1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
40 from utils import Worker
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "conf-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
128 "parent": "Name of the parent of the test",
129 "doc": "Test documentation",
130 "msg": "Test message",
131 "tags": ["tag 1", "tag 2", "tag n"],
133 "status": "PASS" | "FAIL",
140 "parent": "Name of the parent of the test",
141 "doc": "Test documentation",
142 "msg": "Test message",
143 "tags": ["tag 1", "tag 2", "tag n"],
144 "type": "MRR" | "BMRR",
145 "status": "PASS" | "FAIL",
147 "receive-rate": AvgStdevMetadata,
151 # TODO: Remove when definitely no NDRPDRDISC tests are used:
155 "parent": "Name of the parent of the test",
156 "doc": "Test documentation",
157 "msg": "Test message",
158 "tags": ["tag 1", "tag 2", "tag n"],
159 "type": "PDR" | "NDR",
160 "status": "PASS" | "FAIL",
161 "throughput": { # Only type: "PDR" | "NDR"
163 "unit": "pps" | "bps" | "percentage"
165 "latency": { # Only type: "PDR" | "NDR"
172 "50": { # Only for NDR
177 "10": { # Only for NDR
189 "50": { # Only for NDR
194 "10": { # Only for NDR
201 "lossTolerance": "lossTolerance", # Only type: "PDR"
202 "conf-history": "DUT1 and DUT2 VAT History"
203 "show-run": "Show Run"
215 "metadata": { # Optional
216 "version": "VPP version",
217 "job": "Jenkins job name",
218 "build": "Information about the build"
222 "doc": "Suite 1 documentation",
223 "parent": "Suite 1 parent",
224 "level": "Level of the suite in the suite hierarchy"
227 "doc": "Suite N documentation",
228 "parent": "Suite 2 parent",
229 "level": "Level of the suite in the suite hierarchy"
235 "parent": "Name of the parent of the test",
236 "doc": "Test documentation"
237 "msg": "Test message"
238 "tags": ["tag 1", "tag 2", "tag n"],
239 "conf-history": "DUT1 and DUT2 VAT History"
240 "show-run": "Show Run"
241 "status": "PASS" | "FAIL"
249 .. note:: ID is the lowercase full path to the test.
252 # TODO: Remove when definitely no NDRPDRDISC tests are used:
253 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
255 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
256 r'PLRsearch upper bound::\s(\d+.\d+)')
258 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'NDR_UPPER:\s(\d+.\d+).*\n'
260 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
261 r'PDR_UPPER:\s(\d+.\d+)')
263 # TODO: Remove when definitely no NDRPDRDISC tests are used:
264 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
269 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
270 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
272 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
273 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
274 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
276 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
277 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
279 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
282 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
283 r"VPP Version:\s*|VPP version:\s*)(.*)")
285 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
286 r"(RTE Version: 'DPDK )(.*)(')")
288 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
290 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
291 r'tx\s(\d*),\srx\s(\d*)')
293 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
294 r' in packets per second: \[(.*)\]')
296 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
298 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
300 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
302 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
304 def __init__(self, metadata, mapping, ignore):
307 :param metadata: Key-value pairs to be included in "metadata" part of
309 :param mapping: Mapping of the old names of test cases to the new
311 :param ignore: List of TCs to be ignored.
317 # Type of message to parse out from the test messages
318 self._msg_type = None
324 self._timestamp = None
326 # Testbed. The testbed is identified by TG node IP address.
329 # Mapping of TCs long names
330 self._mapping = mapping
333 self._ignore = ignore
335 # Number of VAT History messages found:
337 # 1 - VAT History of DUT1
338 # 2 - VAT History of DUT2
339 self._lookup_kw_nr = 0
340 self._conf_history_lookup_nr = 0
342 # Number of Show Running messages found
344 # 1 - Show run message found
345 self._show_run_lookup_nr = 0
347 # Test ID of currently processed test- the lowercase full path to the
351 # The main data structure
353 "metadata": OrderedDict(),
354 "suites": OrderedDict(),
355 "tests": OrderedDict()
358 # Save the provided metadata
359 for key, val in metadata.items():
360 self._data["metadata"][key] = val
362 # Dictionary defining the methods used to parse different types of
365 "timestamp": self._get_timestamp,
366 "vpp-version": self._get_vpp_version,
367 "dpdk-version": self._get_dpdk_version,
368 "teardown-vat-history": self._get_vat_history,
369 "teardown-papi-history": self._get_papi_history,
370 "test-show-runtime": self._get_show_run,
371 "testbed": self._get_testbed
376 """Getter - Data parsed from the XML file.
378 :returns: Data parsed from the XML file.
383 def _get_testbed(self, msg):
384 """Called when extraction of testbed IP is required.
385 The testbed is identified by TG node IP address.
387 :param msg: Message to process.
392 if msg.message.count("Arguments:"):
393 message = str(msg.message).replace(' ', '').replace('\n', '').\
394 replace("'", '"').replace('b"', '"').\
395 replace("honeycom", "honeycomb")
396 message = loads(message[11:-1])
398 self._testbed = message["TG"]["host"]
399 except (KeyError, ValueError):
402 self._data["metadata"]["testbed"] = self._testbed
403 self._msg_type = None
405 def _get_vpp_version(self, msg):
406 """Called when extraction of VPP version is required.
408 :param msg: Message to process.
413 if msg.message.count("return STDOUT Version:") or \
414 msg.message.count("VPP Version:") or \
415 msg.message.count("VPP version:"):
416 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
418 self._data["metadata"]["version"] = self._version
419 self._msg_type = None
421 def _get_dpdk_version(self, msg):
422 """Called when extraction of DPDK version is required.
424 :param msg: Message to process.
429 if msg.message.count("return STDOUT testpmd"):
431 self._version = str(re.search(
432 self.REGEX_VERSION_DPDK, msg.message). group(4))
433 self._data["metadata"]["version"] = self._version
437 self._msg_type = None
439 def _get_timestamp(self, msg):
440 """Called when extraction of timestamp is required.
442 :param msg: Message to process.
447 self._timestamp = msg.timestamp[:14]
448 self._data["metadata"]["generated"] = self._timestamp
449 self._msg_type = None
451 def _get_vat_history(self, msg):
452 """Called when extraction of VAT command history is required.
454 :param msg: Message to process.
458 if msg.message.count("VAT command history:"):
459 self._conf_history_lookup_nr += 1
460 if self._conf_history_lookup_nr == 1:
461 self._data["tests"][self._test_ID]["conf-history"] = str()
463 self._msg_type = None
464 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
465 "VAT command history:", "", msg.message, count=1). \
466 replace("\n\n", "\n").replace('\n', ' |br| ').\
467 replace('\r', '').replace('"', "'")
469 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
470 self._data["tests"][self._test_ID]["conf-history"] += \
471 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
473 def _get_papi_history(self, msg):
474 """Called when extraction of PAPI command history is required.
476 :param msg: Message to process.
480 if msg.message.count("PAPI command history:"):
481 self._conf_history_lookup_nr += 1
482 if self._conf_history_lookup_nr == 1:
483 self._data["tests"][self._test_ID]["conf-history"] = str()
485 self._msg_type = None
486 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
487 "PAPI command history:", "", msg.message, count=1). \
488 replace("\n\n", "\n").replace('\n', ' |br| ').\
489 replace('\r', '').replace('"', "'")
491 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
492 self._data["tests"][self._test_ID]["conf-history"] += \
493 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
495 def _get_show_run(self, msg):
496 """Called when extraction of VPP operational data (output of CLI command
497 Show Runtime) is required.
499 :param msg: Message to process.
503 if msg.message.count("return STDOUT Thread "):
504 self._show_run_lookup_nr += 1
505 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
506 self._data["tests"][self._test_ID]["show-run"] = str()
507 if self._lookup_kw_nr > 1:
508 self._msg_type = None
509 if self._show_run_lookup_nr == 1:
510 text = msg.message.replace("vat# ", "").\
511 replace("return STDOUT ", "").replace("\n\n", "\n").\
512 replace('\n', ' |br| ').\
513 replace('\r', '').replace('"', "'")
515 self._data["tests"][self._test_ID]["show-run"] += " |br| "
516 self._data["tests"][self._test_ID]["show-run"] += \
517 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
521 # TODO: Remove when definitely no NDRPDRDISC tests are used:
522 def _get_latency(self, msg, test_type):
523 """Get the latency data from the test message.
525 :param msg: Message to be parsed.
526 :param test_type: Type of the test - NDR or PDR.
529 :returns: Latencies parsed from the message.
533 if test_type == "NDR":
534 groups = re.search(self.REGEX_LAT_NDR, msg)
535 groups_range = range(1, 7)
536 elif test_type == "PDR":
537 groups = re.search(self.REGEX_LAT_PDR, msg)
538 groups_range = range(1, 3)
543 for idx in groups_range:
545 lat = [int(item) for item in str(groups.group(idx)).split('/')]
546 except (AttributeError, ValueError):
548 latencies.append(lat)
550 keys = ("min", "avg", "max")
558 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
559 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
560 if test_type == "NDR":
561 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
562 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
563 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
564 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
568 def _get_ndrpdr_throughput(self, msg):
569 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
572 :param msg: The test message to be parsed.
574 :returns: Parsed data as a dict and the status (PASS/FAIL).
575 :rtype: tuple(dict, str)
579 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
580 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
583 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
585 if groups is not None:
587 throughput["NDR"]["LOWER"] = float(groups.group(1))
588 throughput["NDR"]["UPPER"] = float(groups.group(2))
589 throughput["PDR"]["LOWER"] = float(groups.group(3))
590 throughput["PDR"]["UPPER"] = float(groups.group(4))
592 except (IndexError, ValueError):
595 return throughput, status
597 def _get_plr_throughput(self, msg):
598 """Get PLRsearch lower bound and PLRsearch upper bound from the test
601 :param msg: The test message to be parsed.
603 :returns: Parsed data as a dict and the status (PASS/FAIL).
604 :rtype: tuple(dict, str)
612 groups = re.search(self.REGEX_PLR_RATE, msg)
614 if groups is not None:
616 throughput["LOWER"] = float(groups.group(1))
617 throughput["UPPER"] = float(groups.group(2))
619 except (IndexError, ValueError):
622 return throughput, status
624 def _get_ndrpdr_latency(self, msg):
625 """Get LATENCY from the test message.
627 :param msg: The test message to be parsed.
629 :returns: Parsed data as a dict and the status (PASS/FAIL).
630 :rtype: tuple(dict, str)
635 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
636 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
639 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
640 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
644 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
646 if groups is not None:
647 keys = ("min", "avg", "max")
649 latency["NDR"]["direction1"] = dict(
650 zip(keys, [float(l) for l in groups.group(1).split('/')]))
651 latency["NDR"]["direction2"] = dict(
652 zip(keys, [float(l) for l in groups.group(2).split('/')]))
653 latency["PDR"]["direction1"] = dict(
654 zip(keys, [float(l) for l in groups.group(3).split('/')]))
655 latency["PDR"]["direction2"] = dict(
656 zip(keys, [float(l) for l in groups.group(4).split('/')]))
658 except (IndexError, ValueError):
661 return latency, status
663 def visit_suite(self, suite):
664 """Implements traversing through the suite and its direct children.
666 :param suite: Suite to process.
670 if self.start_suite(suite) is not False:
671 suite.suites.visit(self)
672 suite.tests.visit(self)
673 self.end_suite(suite)
675 def start_suite(self, suite):
676 """Called when suite starts.
678 :param suite: Suite to process.
684 parent_name = suite.parent.name
685 except AttributeError:
688 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
689 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
690 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
692 self._data["suites"][suite.longname.lower().replace('"', "'").
693 replace(" ", "_")] = {
694 "name": suite.name.lower(),
696 "parent": parent_name,
697 "level": len(suite.longname.split("."))
700 suite.keywords.visit(self)
702 def end_suite(self, suite):
703 """Called when suite ends.
705 :param suite: Suite to process.
711 def visit_test(self, test):
712 """Implements traversing through the test.
714 :param test: Test to process.
718 if self.start_test(test) is not False:
719 test.keywords.visit(self)
722 def start_test(self, test):
723 """Called when test starts.
725 :param test: Test to process.
730 longname_orig = test.longname.lower()
732 # Check the ignore list
733 if longname_orig in self._ignore:
736 tags = [str(tag) for tag in test.tags]
739 # Change the TC long name and name if defined in the mapping table
740 longname = self._mapping.get(longname_orig, None)
741 if longname is not None:
742 name = longname.split('.')[-1]
743 logging.debug("{0}\n{1}\n{2}\n{3}".format(
744 self._data["metadata"], longname_orig, longname, name))
746 longname = longname_orig
747 name = test.name.lower()
749 # Remove TC number from the TC long name (backward compatibility):
750 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
751 # Remove TC number from the TC name (not needed):
752 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
754 test_result["parent"] = test.parent.name.lower()
755 test_result["tags"] = tags
756 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
757 replace('\r', '').replace('[', ' |br| [')
758 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
759 test_result["msg"] = test.message.replace('\n', ' |br| '). \
760 replace('\r', '').replace('"', "'")
761 test_result["type"] = "FUNC"
762 test_result["status"] = test.status
764 if "PERFTEST" in tags:
765 # Replace info about cores (e.g. -1c-) with the info about threads
766 # and cores (e.g. -1t1c-) in the long test case names and in the
767 # test case names if necessary.
768 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
771 for tag in test_result["tags"]:
772 groups = re.search(self.REGEX_TC_TAG, tag)
778 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
779 "-{0}-".format(tag_tc.lower()),
782 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
783 "-{0}-".format(tag_tc.lower()),
787 test_result["status"] = "FAIL"
788 self._data["tests"][self._test_ID] = test_result
789 logging.debug("The test '{0}' has no or more than one "
790 "multi-threading tags.".format(self._test_ID))
791 logging.debug("Tags: {0}".format(test_result["tags"]))
794 if test.status == "PASS" and ("NDRPDRDISC" in tags or
800 # TODO: Remove when definitely no NDRPDRDISC tests are used:
801 if "NDRDISC" in tags:
802 test_result["type"] = "NDR"
803 # TODO: Remove when definitely no NDRPDRDISC tests are used:
804 elif "PDRDISC" in tags:
805 test_result["type"] = "PDR"
806 elif "NDRPDR" in tags:
807 test_result["type"] = "NDRPDR"
809 test_result["type"] = "SOAK"
811 test_result["type"] = "TCP"
813 test_result["type"] = "MRR"
814 elif "FRMOBL" in tags or "BMRR" in tags:
815 test_result["type"] = "BMRR"
817 test_result["status"] = "FAIL"
818 self._data["tests"][self._test_ID] = test_result
821 # TODO: Remove when definitely no NDRPDRDISC tests are used:
822 if test_result["type"] in ("NDR", "PDR"):
824 rate_value = str(re.search(
825 self.REGEX_RATE, test.message).group(1))
826 except AttributeError:
829 rate_unit = str(re.search(
830 self.REGEX_RATE, test.message).group(2))
831 except AttributeError:
834 test_result["throughput"] = dict()
835 test_result["throughput"]["value"] = \
836 int(rate_value.split('.')[0])
837 test_result["throughput"]["unit"] = rate_unit
838 test_result["latency"] = \
839 self._get_latency(test.message, test_result["type"])
840 if test_result["type"] == "PDR":
841 test_result["lossTolerance"] = str(re.search(
842 self.REGEX_TOLERANCE, test.message).group(1))
844 elif test_result["type"] in ("NDRPDR", ):
845 test_result["throughput"], test_result["status"] = \
846 self._get_ndrpdr_throughput(test.message)
847 test_result["latency"], test_result["status"] = \
848 self._get_ndrpdr_latency(test.message)
850 elif test_result["type"] in ("SOAK", ):
851 test_result["throughput"], test_result["status"] = \
852 self._get_plr_throughput(test.message)
854 elif test_result["type"] in ("TCP", ):
855 groups = re.search(self.REGEX_TCP, test.message)
856 test_result["result"] = int(groups.group(2))
858 elif test_result["type"] in ("MRR", "BMRR"):
859 test_result["result"] = dict()
860 groups = re.search(self.REGEX_BMRR, test.message)
861 if groups is not None:
862 items_str = groups.group(1)
863 items_float = [float(item.strip()) for item
864 in items_str.split(",")]
865 metadata = AvgStdevMetadataFactory.from_data(items_float)
866 # Next two lines have been introduced in CSIT-1179,
867 # to be removed in CSIT-1180.
870 test_result["result"]["receive-rate"] = metadata
872 groups = re.search(self.REGEX_MRR, test.message)
873 test_result["result"]["receive-rate"] = \
874 AvgStdevMetadataFactory.from_data([
875 float(groups.group(3)) / float(groups.group(1)), ])
877 self._data["tests"][self._test_ID] = test_result
879 def end_test(self, test):
880 """Called when test ends.
882 :param test: Test to process.
888 def visit_keyword(self, keyword):
889 """Implements traversing through the keyword and its child keywords.
891 :param keyword: Keyword to process.
892 :type keyword: Keyword
895 if self.start_keyword(keyword) is not False:
896 self.end_keyword(keyword)
898 def start_keyword(self, keyword):
899 """Called when keyword starts. Default implementation does nothing.
901 :param keyword: Keyword to process.
902 :type keyword: Keyword
906 if keyword.type == "setup":
907 self.visit_setup_kw(keyword)
908 elif keyword.type == "teardown":
909 self._lookup_kw_nr = 0
910 self.visit_teardown_kw(keyword)
912 self._lookup_kw_nr = 0
913 self.visit_test_kw(keyword)
914 except AttributeError:
917 def end_keyword(self, keyword):
918 """Called when keyword ends. Default implementation does nothing.
920 :param keyword: Keyword to process.
921 :type keyword: Keyword
926 def visit_test_kw(self, test_kw):
927 """Implements traversing through the test keyword and its child
930 :param test_kw: Keyword to process.
931 :type test_kw: Keyword
934 for keyword in test_kw.keywords:
935 if self.start_test_kw(keyword) is not False:
936 self.visit_test_kw(keyword)
937 self.end_test_kw(keyword)
939 def start_test_kw(self, test_kw):
940 """Called when test keyword starts. Default implementation does
943 :param test_kw: Keyword to process.
944 :type test_kw: Keyword
947 if test_kw.name.count("Show Runtime Counters On All Duts"):
948 self._lookup_kw_nr += 1
949 self._show_run_lookup_nr = 0
950 self._msg_type = "test-show-runtime"
951 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
952 self._msg_type = "dpdk-version"
955 test_kw.messages.visit(self)
957 def end_test_kw(self, test_kw):
958 """Called when keyword ends. Default implementation does nothing.
960 :param test_kw: Keyword to process.
961 :type test_kw: Keyword
966 def visit_setup_kw(self, setup_kw):
967 """Implements traversing through the teardown keyword and its child
970 :param setup_kw: Keyword to process.
971 :type setup_kw: Keyword
974 for keyword in setup_kw.keywords:
975 if self.start_setup_kw(keyword) is not False:
976 self.visit_setup_kw(keyword)
977 self.end_setup_kw(keyword)
979 def start_setup_kw(self, setup_kw):
980 """Called when teardown keyword starts. Default implementation does
983 :param setup_kw: Keyword to process.
984 :type setup_kw: Keyword
987 if setup_kw.name.count("Show Vpp Version On All Duts") \
988 and not self._version:
989 self._msg_type = "vpp-version"
991 elif setup_kw.name.count("Setup performance global Variables") \
992 and not self._timestamp:
993 self._msg_type = "timestamp"
994 elif setup_kw.name.count("Setup Framework") and not self._testbed:
995 self._msg_type = "testbed"
998 setup_kw.messages.visit(self)
1000 def end_setup_kw(self, setup_kw):
1001 """Called when keyword ends. Default implementation does nothing.
1003 :param setup_kw: Keyword to process.
1004 :type setup_kw: Keyword
1009 def visit_teardown_kw(self, teardown_kw):
1010 """Implements traversing through the teardown keyword and its child
1013 :param teardown_kw: Keyword to process.
1014 :type teardown_kw: Keyword
1017 for keyword in teardown_kw.keywords:
1018 if self.start_teardown_kw(keyword) is not False:
1019 self.visit_teardown_kw(keyword)
1020 self.end_teardown_kw(keyword)
1022 def start_teardown_kw(self, teardown_kw):
1023 """Called when teardown keyword starts. Default implementation does
1026 :param teardown_kw: Keyword to process.
1027 :type teardown_kw: Keyword
1031 if teardown_kw.name.count("Show Vat History On All Duts"):
1032 self._conf_history_lookup_nr = 0
1033 self._msg_type = "teardown-vat-history"
1034 teardown_kw.messages.visit(self)
1035 elif teardown_kw.name.count("Show Papi History On All Duts"):
1036 self._conf_history_lookup_nr = 0
1037 self._msg_type = "teardown-papi-history"
1038 teardown_kw.messages.visit(self)
1040 def end_teardown_kw(self, teardown_kw):
1041 """Called when keyword ends. Default implementation does nothing.
1043 :param teardown_kw: Keyword to process.
1044 :type teardown_kw: Keyword
1049 def visit_message(self, msg):
1050 """Implements visiting the message.
1052 :param msg: Message to process.
1056 if self.start_message(msg) is not False:
1057 self.end_message(msg)
1059 def start_message(self, msg):
1060 """Called when message starts. Get required information from messages:
1063 :param msg: Message to process.
1069 self.parse_msg[self._msg_type](msg)
1071 def end_message(self, msg):
1072 """Called when message ends. Default implementation does nothing.
1074 :param msg: Message to process.
1081 class InputData(object):
1084 The data is extracted from output.xml files generated by Jenkins jobs and
1085 stored in pandas' DataFrames.
1091 (as described in ExecutionChecker documentation)
1093 (as described in ExecutionChecker documentation)
1095 (as described in ExecutionChecker documentation)
1098 def __init__(self, spec):
1101 :param spec: Specification.
1102 :type spec: Specification
1109 self._input_data = pd.Series()
1113 """Getter - Input data.
1115 :returns: Input data
1116 :rtype: pandas.Series
1118 return self._input_data
1120 def metadata(self, job, build):
1121 """Getter - metadata
1123 :param job: Job which metadata we want.
1124 :param build: Build which metadata we want.
1128 :rtype: pandas.Series
1131 return self.data[job][build]["metadata"]
1133 def suites(self, job, build):
1136 :param job: Job which suites we want.
1137 :param build: Build which suites we want.
1141 :rtype: pandas.Series
1144 return self.data[job][str(build)]["suites"]
1146 def tests(self, job, build):
1149 :param job: Job which tests we want.
1150 :param build: Build which tests we want.
1154 :rtype: pandas.Series
1157 return self.data[job][build]["tests"]
1159 def _parse_tests(self, job, build, log):
1160 """Process data from robot output.xml file and return JSON structured
1163 :param job: The name of job which build output data will be processed.
1164 :param build: The build which output data will be processed.
1165 :param log: List of log messages.
1168 :type log: list of tuples (severity, msg)
1169 :returns: JSON data structure.
1178 with open(build["file-name"], 'r') as data_file:
1180 result = ExecutionResult(data_file)
1181 except errors.DataError as err:
1182 log.append(("ERROR", "Error occurred while parsing output.xml: "
1185 checker = ExecutionChecker(metadata, self._cfg.mapping,
1187 result.visit(checker)
1191 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1192 """Download and parse the input data file.
1194 :param pid: PID of the process executing this method.
1195 :param data_queue: Shared memory between processes. Queue which keeps
1196 the result data. This data is then read by the main process and used
1197 in further processing.
1198 :param job: Name of the Jenkins job which generated the processed input
1200 :param build: Information about the Jenkins build which generated the
1201 processed input file.
1202 :param repeat: Repeat the download specified number of times if not
1205 :type data_queue: multiprocessing.Manager().Queue()
1213 logging.info(" Processing the job/build: {0}: {1}".
1214 format(job, build["build"]))
1216 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1217 format(job, build["build"])))
1224 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1230 logs.append(("ERROR", "It is not possible to download the input "
1231 "data file from the job '{job}', build "
1232 "'{build}', or it is damaged. Skipped.".
1233 format(job=job, build=build["build"])))
1235 logs.append(("INFO", " Processing data from the build '{0}' ...".
1236 format(build["build"])))
1237 data = self._parse_tests(job, build, logs)
1239 logs.append(("ERROR", "Input data file from the job '{job}', "
1240 "build '{build}' is damaged. Skipped.".
1241 format(job=job, build=build["build"])))
1246 remove(build["file-name"])
1247 except OSError as err:
1248 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1249 format(build["file-name"], repr(err))))
1251 # If the time-period is defined in the specification file, remove all
1252 # files which are outside the time period.
1253 timeperiod = self._cfg.input.get("time-period", None)
1254 if timeperiod and data:
1256 timeperiod = timedelta(int(timeperiod))
1257 metadata = data.get("metadata", None)
1259 generated = metadata.get("generated", None)
1261 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1262 if (now - generated) > timeperiod:
1263 # Remove the data and the file:
1268 " The build {job}/{build} is outdated, will be "
1269 "removed".format(job=job, build=build["build"])))
1270 file_name = self._cfg.input["file-name"]
1272 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1273 "{job}{sep}{build}{sep}{name}".
1276 build=build["build"],
1280 logs.append(("INFO",
1281 " The file {name} has been removed".
1282 format(name=full_name)))
1283 except OSError as err:
1284 logs.append(("ERROR",
1285 "Cannot remove the file '{0}': {1}".
1286 format(full_name, repr(err))))
1288 logs.append(("INFO", " Done."))
1297 data_queue.put(result)
1299 def download_and_parse_data(self, repeat=1):
1300 """Download the input data files, parse input data from input files and
1301 store in pandas' Series.
1303 :param repeat: Repeat the download specified number of times if not
1308 logging.info("Downloading and parsing input files ...")
1310 work_queue = multiprocessing.JoinableQueue()
1311 manager = multiprocessing.Manager()
1312 data_queue = manager.Queue()
1313 cpus = multiprocessing.cpu_count()
1316 for cpu in range(cpus):
1317 worker = Worker(work_queue,
1319 self._download_and_parse_build)
1320 worker.daemon = True
1322 workers.append(worker)
1323 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1324 format(cpu, worker.pid))
1326 for job, builds in self._cfg.builds.items():
1327 for build in builds:
1328 work_queue.put((job, build, repeat))
1332 logging.info("Done.")
1334 while not data_queue.empty():
1335 result = data_queue.get()
1338 build_nr = result["build"]["build"]
1341 data = result["data"]
1342 build_data = pd.Series({
1343 "metadata": pd.Series(data["metadata"].values(),
1344 index=data["metadata"].keys()),
1345 "suites": pd.Series(data["suites"].values(),
1346 index=data["suites"].keys()),
1347 "tests": pd.Series(data["tests"].values(),
1348 index=data["tests"].keys())})
1350 if self._input_data.get(job, None) is None:
1351 self._input_data[job] = pd.Series()
1352 self._input_data[job][str(build_nr)] = build_data
1354 self._cfg.set_input_file_name(job, build_nr,
1355 result["build"]["file-name"])
1357 self._cfg.set_input_state(job, build_nr, result["state"])
1359 for item in result["logs"]:
1360 if item[0] == "INFO":
1361 logging.info(item[1])
1362 elif item[0] == "ERROR":
1363 logging.error(item[1])
1364 elif item[0] == "DEBUG":
1365 logging.debug(item[1])
1366 elif item[0] == "CRITICAL":
1367 logging.critical(item[1])
1368 elif item[0] == "WARNING":
1369 logging.warning(item[1])
1373 # Terminate all workers
1374 for worker in workers:
1378 logging.info("Done.")
1381 def _end_of_tag(tag_filter, start=0, closer="'"):
1382 """Return the index of character in the string which is the end of tag.
1384 :param tag_filter: The string where the end of tag is being searched.
1385 :param start: The index where the searching is stated.
1386 :param closer: The character which is the tag closer.
1387 :type tag_filter: str
1390 :returns: The index of the tag closer.
1395 idx_opener = tag_filter.index(closer, start)
1396 return tag_filter.index(closer, idx_opener + 1)
1401 def _condition(tag_filter):
1402 """Create a conditional statement from the given tag filter.
1404 :param tag_filter: Filter based on tags from the element specification.
1405 :type tag_filter: str
1406 :returns: Conditional statement which can be evaluated.
1412 index = InputData._end_of_tag(tag_filter, index)
1416 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1418 def filter_data(self, element, params=None, data_set="tests",
1419 continue_on_error=False):
1420 """Filter required data from the given jobs and builds.
1422 The output data structure is:
1426 - test (or suite) 1 ID:
1432 - test (or suite) n ID:
1439 :param element: Element which will use the filtered data.
1440 :param params: Parameters which will be included in the output. If None,
1441 all parameters are included.
1442 :param data_set: The set of data to be filtered: tests, suites,
1444 :param continue_on_error: Continue if there is error while reading the
1445 data. The Item will be empty then
1446 :type element: pandas.Series
1449 :type continue_on_error: bool
1450 :returns: Filtered data.
1451 :rtype pandas.Series
1455 if element["filter"] in ("all", "template"):
1458 cond = InputData._condition(element["filter"])
1459 logging.debug(" Filter: {0}".format(cond))
1461 logging.error(" No filter defined.")
1465 params = element.get("parameters", None)
1467 params.append("type")
1471 for job, builds in element["data"].items():
1472 data[job] = pd.Series()
1473 for build in builds:
1474 data[job][str(build)] = pd.Series()
1476 data_iter = self.data[job][str(build)][data_set].\
1479 if continue_on_error:
1483 for test_ID, test_data in data_iter:
1484 if eval(cond, {"tags": test_data.get("tags", "")}):
1485 data[job][str(build)][test_ID] = pd.Series()
1487 for param, val in test_data.items():
1488 data[job][str(build)][test_ID][param] = val
1490 for param in params:
1492 data[job][str(build)][test_ID][param] =\
1495 data[job][str(build)][test_ID][param] =\
1499 except (KeyError, IndexError, ValueError) as err:
1500 logging.error(" Missing mandatory parameter in the element "
1501 "specification: {0}".format(err))
1503 except AttributeError:
1506 logging.error(" The filter '{0}' is not correct. Check if all "
1507 "tags are enclosed by apostrophes.".format(cond))
1511 def merge_data(data):
1512 """Merge data from more jobs and builds to a simple data structure.
1514 The output data structure is:
1516 - test (suite) 1 ID:
1522 - test (suite) n ID:
1525 :param data: Data to merge.
1526 :type data: pandas.Series
1527 :returns: Merged data.
1528 :rtype: pandas.Series
1531 logging.info(" Merging data ...")
1533 merged_data = pd.Series()
1534 for _, builds in data.iteritems():
1535 for _, item in builds.iteritems():
1536 for ID, item_data in item.iteritems():
1537 merged_data[ID] = item_data