1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
40 from utils import Worker
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "conf-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
128 "parent": "Name of the parent of the test",
129 "doc": "Test documentation",
130 "msg": "Test message",
131 "tags": ["tag 1", "tag 2", "tag n"],
133 "status": "PASS" | "FAIL",
140 "parent": "Name of the parent of the test",
141 "doc": "Test documentation",
142 "msg": "Test message",
143 "tags": ["tag 1", "tag 2", "tag n"],
144 "type": "MRR" | "BMRR",
145 "status": "PASS" | "FAIL",
147 "receive-rate": AvgStdevMetadata,
151 # TODO: Remove when definitely no NDRPDRDISC tests are used:
155 "parent": "Name of the parent of the test",
156 "doc": "Test documentation",
157 "msg": "Test message",
158 "tags": ["tag 1", "tag 2", "tag n"],
159 "type": "PDR" | "NDR",
160 "status": "PASS" | "FAIL",
161 "throughput": { # Only type: "PDR" | "NDR"
163 "unit": "pps" | "bps" | "percentage"
165 "latency": { # Only type: "PDR" | "NDR"
172 "50": { # Only for NDR
177 "10": { # Only for NDR
189 "50": { # Only for NDR
194 "10": { # Only for NDR
201 "lossTolerance": "lossTolerance", # Only type: "PDR"
202 "conf-history": "DUT1 and DUT2 VAT History"
203 "show-run": "Show Run"
215 "metadata": { # Optional
216 "version": "VPP version",
217 "job": "Jenkins job name",
218 "build": "Information about the build"
222 "doc": "Suite 1 documentation",
223 "parent": "Suite 1 parent",
224 "level": "Level of the suite in the suite hierarchy"
227 "doc": "Suite N documentation",
228 "parent": "Suite 2 parent",
229 "level": "Level of the suite in the suite hierarchy"
235 "parent": "Name of the parent of the test",
236 "doc": "Test documentation"
237 "msg": "Test message"
238 "tags": ["tag 1", "tag 2", "tag n"],
239 "conf-history": "DUT1 and DUT2 VAT History"
240 "show-run": "Show Run"
241 "status": "PASS" | "FAIL"
249 .. note:: ID is the lowercase full path to the test.
252 # TODO: Remove when definitely no NDRPDRDISC tests are used:
253 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
255 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
256 r'PLRsearch upper bound::\s(\d+.\d+)')
258 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
259 r'NDR_UPPER:\s(\d+.\d+).*\n'
260 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
261 r'PDR_UPPER:\s(\d+.\d+)')
263 # TODO: Remove when definitely no NDRPDRDISC tests are used:
264 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
265 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
266 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
267 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
268 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
269 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
270 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
272 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
273 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
274 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
276 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
277 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
279 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
282 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
283 r"VPP Version:\s*)(.*)")
285 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
286 r"(RTE Version: 'DPDK )(.*)(')")
288 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
290 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
291 r'tx\s(\d*),\srx\s(\d*)')
293 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
294 r' in packets per second: \[(.*)\]')
296 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
298 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
300 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
302 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
304 def __init__(self, metadata, mapping, ignore):
307 :param metadata: Key-value pairs to be included in "metadata" part of
309 :param mapping: Mapping of the old names of test cases to the new
311 :param ignore: List of TCs to be ignored.
317 # Type of message to parse out from the test messages
318 self._msg_type = None
324 self._timestamp = None
326 # Testbed. The testbed is identified by TG node IP address.
329 # Mapping of TCs long names
330 self._mapping = mapping
333 self._ignore = ignore
335 # Number of VAT History messages found:
337 # 1 - VAT History of DUT1
338 # 2 - VAT History of DUT2
339 self._lookup_kw_nr = 0
340 self._conf_history_lookup_nr = 0
342 # Number of Show Running messages found
344 # 1 - Show run message found
345 self._show_run_lookup_nr = 0
347 # Test ID of currently processed test- the lowercase full path to the
351 # The main data structure
353 "metadata": OrderedDict(),
354 "suites": OrderedDict(),
355 "tests": OrderedDict()
358 # Save the provided metadata
359 for key, val in metadata.items():
360 self._data["metadata"][key] = val
362 # Dictionary defining the methods used to parse different types of
365 "timestamp": self._get_timestamp,
366 "vpp-version": self._get_vpp_version,
367 "dpdk-version": self._get_dpdk_version,
368 "teardown-vat-history": self._get_vat_history,
369 "teardown-papi-history": self._get_papi_history,
370 "test-show-runtime": self._get_show_run,
371 "testbed": self._get_testbed
376 """Getter - Data parsed from the XML file.
378 :returns: Data parsed from the XML file.
383 def _get_testbed(self, msg):
384 """Called when extraction of testbed IP is required.
385 The testbed is identified by TG node IP address.
387 :param msg: Message to process.
392 if msg.message.count("Arguments:"):
393 message = str(msg.message).replace(' ', '').replace('\n', '').\
394 replace("'", '"').replace('b"', '"').\
395 replace("honeycom", "honeycomb")
396 message = loads(message[11:-1])
398 self._testbed = message["TG"]["host"]
399 except (KeyError, ValueError):
402 self._data["metadata"]["testbed"] = self._testbed
403 self._msg_type = None
405 def _get_vpp_version(self, msg):
406 """Called when extraction of VPP version is required.
408 :param msg: Message to process.
413 if msg.message.count("return STDOUT Version:") or \
414 msg.message.count("VPP Version:"):
415 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
417 self._data["metadata"]["version"] = self._version
418 self._msg_type = None
420 def _get_dpdk_version(self, msg):
421 """Called when extraction of DPDK version is required.
423 :param msg: Message to process.
428 if msg.message.count("return STDOUT testpmd"):
430 self._version = str(re.search(
431 self.REGEX_VERSION_DPDK, msg.message). group(4))
432 self._data["metadata"]["version"] = self._version
436 self._msg_type = None
438 def _get_timestamp(self, msg):
439 """Called when extraction of timestamp is required.
441 :param msg: Message to process.
446 self._timestamp = msg.timestamp[:14]
447 self._data["metadata"]["generated"] = self._timestamp
448 self._msg_type = None
450 def _get_vat_history(self, msg):
451 """Called when extraction of VAT command history is required.
453 :param msg: Message to process.
457 if msg.message.count("VAT command history:"):
458 self._conf_history_lookup_nr += 1
459 if self._conf_history_lookup_nr == 1:
460 self._data["tests"][self._test_ID]["conf-history"] = str()
462 self._msg_type = None
463 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
464 "VAT command history:", "", msg.message, count=1). \
465 replace("\n\n", "\n").replace('\n', ' |br| ').\
466 replace('\r', '').replace('"', "'")
468 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
469 self._data["tests"][self._test_ID]["conf-history"] += \
470 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
472 def _get_papi_history(self, msg):
473 """Called when extraction of PAPI command history is required.
475 :param msg: Message to process.
479 if msg.message.count("PAPI command history:"):
480 self._conf_history_lookup_nr += 1
481 if self._conf_history_lookup_nr == 1:
482 self._data["tests"][self._test_ID]["conf-history"] = str()
484 self._msg_type = None
485 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
486 "PAPI command history:", "", msg.message, count=1). \
487 replace("\n\n", "\n").replace('\n', ' |br| ').\
488 replace('\r', '').replace('"', "'")
490 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
491 self._data["tests"][self._test_ID]["conf-history"] += \
492 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
494 def _get_show_run(self, msg):
495 """Called when extraction of VPP operational data (output of CLI command
496 Show Runtime) is required.
498 :param msg: Message to process.
502 if msg.message.count("return STDOUT Thread "):
503 self._show_run_lookup_nr += 1
504 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
505 self._data["tests"][self._test_ID]["show-run"] = str()
506 if self._lookup_kw_nr > 1:
507 self._msg_type = None
508 if self._show_run_lookup_nr == 1:
509 text = msg.message.replace("vat# ", "").\
510 replace("return STDOUT ", "").replace("\n\n", "\n").\
511 replace('\n', ' |br| ').\
512 replace('\r', '').replace('"', "'")
514 self._data["tests"][self._test_ID]["show-run"] += " |br| "
515 self._data["tests"][self._test_ID]["show-run"] += \
516 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
520 # TODO: Remove when definitely no NDRPDRDISC tests are used:
521 def _get_latency(self, msg, test_type):
522 """Get the latency data from the test message.
524 :param msg: Message to be parsed.
525 :param test_type: Type of the test - NDR or PDR.
528 :returns: Latencies parsed from the message.
532 if test_type == "NDR":
533 groups = re.search(self.REGEX_LAT_NDR, msg)
534 groups_range = range(1, 7)
535 elif test_type == "PDR":
536 groups = re.search(self.REGEX_LAT_PDR, msg)
537 groups_range = range(1, 3)
542 for idx in groups_range:
544 lat = [int(item) for item in str(groups.group(idx)).split('/')]
545 except (AttributeError, ValueError):
547 latencies.append(lat)
549 keys = ("min", "avg", "max")
557 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
558 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
559 if test_type == "NDR":
560 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
561 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
562 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
563 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
567 def _get_ndrpdr_throughput(self, msg):
568 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
571 :param msg: The test message to be parsed.
573 :returns: Parsed data as a dict and the status (PASS/FAIL).
574 :rtype: tuple(dict, str)
578 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
579 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
582 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
584 if groups is not None:
586 throughput["NDR"]["LOWER"] = float(groups.group(1))
587 throughput["NDR"]["UPPER"] = float(groups.group(2))
588 throughput["PDR"]["LOWER"] = float(groups.group(3))
589 throughput["PDR"]["UPPER"] = float(groups.group(4))
591 except (IndexError, ValueError):
594 return throughput, status
596 def _get_plr_throughput(self, msg):
597 """Get PLRsearch lower bound and PLRsearch upper bound from the test
600 :param msg: The test message to be parsed.
602 :returns: Parsed data as a dict and the status (PASS/FAIL).
603 :rtype: tuple(dict, str)
611 groups = re.search(self.REGEX_PLR_RATE, msg)
613 if groups is not None:
615 throughput["LOWER"] = float(groups.group(1))
616 throughput["UPPER"] = float(groups.group(2))
618 except (IndexError, ValueError):
621 return throughput, status
623 def _get_ndrpdr_latency(self, msg):
624 """Get LATENCY from the test message.
626 :param msg: The test message to be parsed.
628 :returns: Parsed data as a dict and the status (PASS/FAIL).
629 :rtype: tuple(dict, str)
634 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
635 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
638 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
639 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
643 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
645 if groups is not None:
646 keys = ("min", "avg", "max")
648 latency["NDR"]["direction1"] = dict(
649 zip(keys, [float(l) for l in groups.group(1).split('/')]))
650 latency["NDR"]["direction2"] = dict(
651 zip(keys, [float(l) for l in groups.group(2).split('/')]))
652 latency["PDR"]["direction1"] = dict(
653 zip(keys, [float(l) for l in groups.group(3).split('/')]))
654 latency["PDR"]["direction2"] = dict(
655 zip(keys, [float(l) for l in groups.group(4).split('/')]))
657 except (IndexError, ValueError):
660 return latency, status
662 def visit_suite(self, suite):
663 """Implements traversing through the suite and its direct children.
665 :param suite: Suite to process.
669 if self.start_suite(suite) is not False:
670 suite.suites.visit(self)
671 suite.tests.visit(self)
672 self.end_suite(suite)
674 def start_suite(self, suite):
675 """Called when suite starts.
677 :param suite: Suite to process.
683 parent_name = suite.parent.name
684 except AttributeError:
687 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
688 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
689 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
691 self._data["suites"][suite.longname.lower().replace('"', "'").
692 replace(" ", "_")] = {
693 "name": suite.name.lower(),
695 "parent": parent_name,
696 "level": len(suite.longname.split("."))
699 suite.keywords.visit(self)
701 def end_suite(self, suite):
702 """Called when suite ends.
704 :param suite: Suite to process.
710 def visit_test(self, test):
711 """Implements traversing through the test.
713 :param test: Test to process.
717 if self.start_test(test) is not False:
718 test.keywords.visit(self)
721 def start_test(self, test):
722 """Called when test starts.
724 :param test: Test to process.
729 longname_orig = test.longname.lower()
731 # Check the ignore list
732 if longname_orig in self._ignore:
735 tags = [str(tag) for tag in test.tags]
738 # Change the TC long name and name if defined in the mapping table
739 longname = self._mapping.get(longname_orig, None)
740 if longname is not None:
741 name = longname.split('.')[-1]
742 logging.debug("{0}\n{1}\n{2}\n{3}".format(
743 self._data["metadata"], longname_orig, longname, name))
745 longname = longname_orig
746 name = test.name.lower()
748 # Remove TC number from the TC long name (backward compatibility):
749 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
750 # Remove TC number from the TC name (not needed):
751 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
753 test_result["parent"] = test.parent.name.lower()
754 test_result["tags"] = tags
755 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
756 replace('\r', '').replace('[', ' |br| [')
757 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
758 test_result["msg"] = test.message.replace('\n', ' |br| '). \
759 replace('\r', '').replace('"', "'")
760 test_result["type"] = "FUNC"
761 test_result["status"] = test.status
763 if "PERFTEST" in tags:
764 # Replace info about cores (e.g. -1c-) with the info about threads
765 # and cores (e.g. -1t1c-) in the long test case names and in the
766 # test case names if necessary.
767 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
770 for tag in test_result["tags"]:
771 groups = re.search(self.REGEX_TC_TAG, tag)
777 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
778 "-{0}-".format(tag_tc.lower()),
781 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
782 "-{0}-".format(tag_tc.lower()),
786 test_result["status"] = "FAIL"
787 self._data["tests"][self._test_ID] = test_result
788 logging.debug("The test '{0}' has no or more than one "
789 "multi-threading tags.".format(self._test_ID))
790 logging.debug("Tags: {0}".format(test_result["tags"]))
793 if test.status == "PASS" and ("NDRPDRDISC" in tags or
799 # TODO: Remove when definitely no NDRPDRDISC tests are used:
800 if "NDRDISC" in tags:
801 test_result["type"] = "NDR"
802 # TODO: Remove when definitely no NDRPDRDISC tests are used:
803 elif "PDRDISC" in tags:
804 test_result["type"] = "PDR"
805 elif "NDRPDR" in tags:
806 test_result["type"] = "NDRPDR"
808 test_result["type"] = "SOAK"
810 test_result["type"] = "TCP"
812 test_result["type"] = "MRR"
813 elif "FRMOBL" in tags or "BMRR" in tags:
814 test_result["type"] = "BMRR"
816 test_result["status"] = "FAIL"
817 self._data["tests"][self._test_ID] = test_result
820 # TODO: Remove when definitely no NDRPDRDISC tests are used:
821 if test_result["type"] in ("NDR", "PDR"):
823 rate_value = str(re.search(
824 self.REGEX_RATE, test.message).group(1))
825 except AttributeError:
828 rate_unit = str(re.search(
829 self.REGEX_RATE, test.message).group(2))
830 except AttributeError:
833 test_result["throughput"] = dict()
834 test_result["throughput"]["value"] = \
835 int(rate_value.split('.')[0])
836 test_result["throughput"]["unit"] = rate_unit
837 test_result["latency"] = \
838 self._get_latency(test.message, test_result["type"])
839 if test_result["type"] == "PDR":
840 test_result["lossTolerance"] = str(re.search(
841 self.REGEX_TOLERANCE, test.message).group(1))
843 elif test_result["type"] in ("NDRPDR", ):
844 test_result["throughput"], test_result["status"] = \
845 self._get_ndrpdr_throughput(test.message)
846 test_result["latency"], test_result["status"] = \
847 self._get_ndrpdr_latency(test.message)
849 elif test_result["type"] in ("SOAK", ):
850 test_result["throughput"], test_result["status"] = \
851 self._get_plr_throughput(test.message)
853 elif test_result["type"] in ("TCP", ):
854 groups = re.search(self.REGEX_TCP, test.message)
855 test_result["result"] = int(groups.group(2))
857 elif test_result["type"] in ("MRR", "BMRR"):
858 test_result["result"] = dict()
859 groups = re.search(self.REGEX_BMRR, test.message)
860 if groups is not None:
861 items_str = groups.group(1)
862 items_float = [float(item.strip()) for item
863 in items_str.split(",")]
864 metadata = AvgStdevMetadataFactory.from_data(items_float)
865 # Next two lines have been introduced in CSIT-1179,
866 # to be removed in CSIT-1180.
869 test_result["result"]["receive-rate"] = metadata
871 groups = re.search(self.REGEX_MRR, test.message)
872 test_result["result"]["receive-rate"] = \
873 AvgStdevMetadataFactory.from_data([
874 float(groups.group(3)) / float(groups.group(1)), ])
876 self._data["tests"][self._test_ID] = test_result
878 def end_test(self, test):
879 """Called when test ends.
881 :param test: Test to process.
887 def visit_keyword(self, keyword):
888 """Implements traversing through the keyword and its child keywords.
890 :param keyword: Keyword to process.
891 :type keyword: Keyword
894 if self.start_keyword(keyword) is not False:
895 self.end_keyword(keyword)
897 def start_keyword(self, keyword):
898 """Called when keyword starts. Default implementation does nothing.
900 :param keyword: Keyword to process.
901 :type keyword: Keyword
905 if keyword.type == "setup":
906 self.visit_setup_kw(keyword)
907 elif keyword.type == "teardown":
908 self._lookup_kw_nr = 0
909 self.visit_teardown_kw(keyword)
911 self._lookup_kw_nr = 0
912 self.visit_test_kw(keyword)
913 except AttributeError:
916 def end_keyword(self, keyword):
917 """Called when keyword ends. Default implementation does nothing.
919 :param keyword: Keyword to process.
920 :type keyword: Keyword
925 def visit_test_kw(self, test_kw):
926 """Implements traversing through the test keyword and its child
929 :param test_kw: Keyword to process.
930 :type test_kw: Keyword
933 for keyword in test_kw.keywords:
934 if self.start_test_kw(keyword) is not False:
935 self.visit_test_kw(keyword)
936 self.end_test_kw(keyword)
938 def start_test_kw(self, test_kw):
939 """Called when test keyword starts. Default implementation does
942 :param test_kw: Keyword to process.
943 :type test_kw: Keyword
946 if test_kw.name.count("Show Runtime Counters On All Duts"):
947 self._lookup_kw_nr += 1
948 self._show_run_lookup_nr = 0
949 self._msg_type = "test-show-runtime"
950 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
951 self._msg_type = "dpdk-version"
954 test_kw.messages.visit(self)
956 def end_test_kw(self, test_kw):
957 """Called when keyword ends. Default implementation does nothing.
959 :param test_kw: Keyword to process.
960 :type test_kw: Keyword
965 def visit_setup_kw(self, setup_kw):
966 """Implements traversing through the teardown keyword and its child
969 :param setup_kw: Keyword to process.
970 :type setup_kw: Keyword
973 for keyword in setup_kw.keywords:
974 if self.start_setup_kw(keyword) is not False:
975 self.visit_setup_kw(keyword)
976 self.end_setup_kw(keyword)
978 def start_setup_kw(self, setup_kw):
979 """Called when teardown keyword starts. Default implementation does
982 :param setup_kw: Keyword to process.
983 :type setup_kw: Keyword
986 if setup_kw.name.count("Show Vpp Version On All Duts") \
987 and not self._version:
988 self._msg_type = "vpp-version"
990 elif setup_kw.name.count("Setup performance global Variables") \
991 and not self._timestamp:
992 self._msg_type = "timestamp"
993 elif setup_kw.name.count("Setup Framework") and not self._testbed:
994 self._msg_type = "testbed"
997 setup_kw.messages.visit(self)
999 def end_setup_kw(self, setup_kw):
1000 """Called when keyword ends. Default implementation does nothing.
1002 :param setup_kw: Keyword to process.
1003 :type setup_kw: Keyword
1008 def visit_teardown_kw(self, teardown_kw):
1009 """Implements traversing through the teardown keyword and its child
1012 :param teardown_kw: Keyword to process.
1013 :type teardown_kw: Keyword
1016 for keyword in teardown_kw.keywords:
1017 if self.start_teardown_kw(keyword) is not False:
1018 self.visit_teardown_kw(keyword)
1019 self.end_teardown_kw(keyword)
1021 def start_teardown_kw(self, teardown_kw):
1022 """Called when teardown keyword starts. Default implementation does
1025 :param teardown_kw: Keyword to process.
1026 :type teardown_kw: Keyword
1030 if teardown_kw.name.count("Show Vat History On All Duts"):
1031 self._conf_history_lookup_nr = 0
1032 self._msg_type = "teardown-vat-history"
1033 teardown_kw.messages.visit(self)
1034 elif teardown_kw.name.count("Show Papi History On All Duts"):
1035 self._conf_history_lookup_nr = 0
1036 self._msg_type = "teardown-papi-history"
1037 teardown_kw.messages.visit(self)
1039 def end_teardown_kw(self, teardown_kw):
1040 """Called when keyword ends. Default implementation does nothing.
1042 :param teardown_kw: Keyword to process.
1043 :type teardown_kw: Keyword
1048 def visit_message(self, msg):
1049 """Implements visiting the message.
1051 :param msg: Message to process.
1055 if self.start_message(msg) is not False:
1056 self.end_message(msg)
1058 def start_message(self, msg):
1059 """Called when message starts. Get required information from messages:
1062 :param msg: Message to process.
1068 self.parse_msg[self._msg_type](msg)
1070 def end_message(self, msg):
1071 """Called when message ends. Default implementation does nothing.
1073 :param msg: Message to process.
1080 class InputData(object):
1083 The data is extracted from output.xml files generated by Jenkins jobs and
1084 stored in pandas' DataFrames.
1090 (as described in ExecutionChecker documentation)
1092 (as described in ExecutionChecker documentation)
1094 (as described in ExecutionChecker documentation)
1097 def __init__(self, spec):
1100 :param spec: Specification.
1101 :type spec: Specification
1108 self._input_data = pd.Series()
1112 """Getter - Input data.
1114 :returns: Input data
1115 :rtype: pandas.Series
1117 return self._input_data
1119 def metadata(self, job, build):
1120 """Getter - metadata
1122 :param job: Job which metadata we want.
1123 :param build: Build which metadata we want.
1127 :rtype: pandas.Series
1130 return self.data[job][build]["metadata"]
1132 def suites(self, job, build):
1135 :param job: Job which suites we want.
1136 :param build: Build which suites we want.
1140 :rtype: pandas.Series
1143 return self.data[job][str(build)]["suites"]
1145 def tests(self, job, build):
1148 :param job: Job which tests we want.
1149 :param build: Build which tests we want.
1153 :rtype: pandas.Series
1156 return self.data[job][build]["tests"]
1158 def _parse_tests(self, job, build, log):
1159 """Process data from robot output.xml file and return JSON structured
1162 :param job: The name of job which build output data will be processed.
1163 :param build: The build which output data will be processed.
1164 :param log: List of log messages.
1167 :type log: list of tuples (severity, msg)
1168 :returns: JSON data structure.
1177 with open(build["file-name"], 'r') as data_file:
1179 result = ExecutionResult(data_file)
1180 except errors.DataError as err:
1181 log.append(("ERROR", "Error occurred while parsing output.xml: "
1184 checker = ExecutionChecker(metadata, self._cfg.mapping,
1186 result.visit(checker)
1190 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1191 """Download and parse the input data file.
1193 :param pid: PID of the process executing this method.
1194 :param data_queue: Shared memory between processes. Queue which keeps
1195 the result data. This data is then read by the main process and used
1196 in further processing.
1197 :param job: Name of the Jenkins job which generated the processed input
1199 :param build: Information about the Jenkins build which generated the
1200 processed input file.
1201 :param repeat: Repeat the download specified number of times if not
1204 :type data_queue: multiprocessing.Manager().Queue()
1212 logging.info(" Processing the job/build: {0}: {1}".
1213 format(job, build["build"]))
1215 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1216 format(job, build["build"])))
1223 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1229 logs.append(("ERROR", "It is not possible to download the input "
1230 "data file from the job '{job}', build "
1231 "'{build}', or it is damaged. Skipped.".
1232 format(job=job, build=build["build"])))
1234 logs.append(("INFO", " Processing data from the build '{0}' ...".
1235 format(build["build"])))
1236 data = self._parse_tests(job, build, logs)
1238 logs.append(("ERROR", "Input data file from the job '{job}', "
1239 "build '{build}' is damaged. Skipped.".
1240 format(job=job, build=build["build"])))
1245 remove(build["file-name"])
1246 except OSError as err:
1247 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1248 format(build["file-name"], repr(err))))
1250 # If the time-period is defined in the specification file, remove all
1251 # files which are outside the time period.
1252 timeperiod = self._cfg.input.get("time-period", None)
1253 if timeperiod and data:
1255 timeperiod = timedelta(int(timeperiod))
1256 metadata = data.get("metadata", None)
1258 generated = metadata.get("generated", None)
1260 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1261 if (now - generated) > timeperiod:
1262 # Remove the data and the file:
1267 " The build {job}/{build} is outdated, will be "
1268 "removed".format(job=job, build=build["build"])))
1269 file_name = self._cfg.input["file-name"]
1271 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1272 "{job}{sep}{build}{sep}{name}".
1275 build=build["build"],
1279 logs.append(("INFO",
1280 " The file {name} has been removed".
1281 format(name=full_name)))
1282 except OSError as err:
1283 logs.append(("ERROR",
1284 "Cannot remove the file '{0}': {1}".
1285 format(full_name, repr(err))))
1287 logs.append(("INFO", " Done."))
1296 data_queue.put(result)
1298 def download_and_parse_data(self, repeat=1):
1299 """Download the input data files, parse input data from input files and
1300 store in pandas' Series.
1302 :param repeat: Repeat the download specified number of times if not
1307 logging.info("Downloading and parsing input files ...")
1309 work_queue = multiprocessing.JoinableQueue()
1310 manager = multiprocessing.Manager()
1311 data_queue = manager.Queue()
1312 cpus = multiprocessing.cpu_count()
1315 for cpu in range(cpus):
1316 worker = Worker(work_queue,
1318 self._download_and_parse_build)
1319 worker.daemon = True
1321 workers.append(worker)
1322 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1323 format(cpu, worker.pid))
1325 for job, builds in self._cfg.builds.items():
1326 for build in builds:
1327 work_queue.put((job, build, repeat))
1331 logging.info("Done.")
1333 while not data_queue.empty():
1334 result = data_queue.get()
1337 build_nr = result["build"]["build"]
1340 data = result["data"]
1341 build_data = pd.Series({
1342 "metadata": pd.Series(data["metadata"].values(),
1343 index=data["metadata"].keys()),
1344 "suites": pd.Series(data["suites"].values(),
1345 index=data["suites"].keys()),
1346 "tests": pd.Series(data["tests"].values(),
1347 index=data["tests"].keys())})
1349 if self._input_data.get(job, None) is None:
1350 self._input_data[job] = pd.Series()
1351 self._input_data[job][str(build_nr)] = build_data
1353 self._cfg.set_input_file_name(job, build_nr,
1354 result["build"]["file-name"])
1356 self._cfg.set_input_state(job, build_nr, result["state"])
1358 for item in result["logs"]:
1359 if item[0] == "INFO":
1360 logging.info(item[1])
1361 elif item[0] == "ERROR":
1362 logging.error(item[1])
1363 elif item[0] == "DEBUG":
1364 logging.debug(item[1])
1365 elif item[0] == "CRITICAL":
1366 logging.critical(item[1])
1367 elif item[0] == "WARNING":
1368 logging.warning(item[1])
1372 # Terminate all workers
1373 for worker in workers:
1377 logging.info("Done.")
1380 def _end_of_tag(tag_filter, start=0, closer="'"):
1381 """Return the index of character in the string which is the end of tag.
1383 :param tag_filter: The string where the end of tag is being searched.
1384 :param start: The index where the searching is stated.
1385 :param closer: The character which is the tag closer.
1386 :type tag_filter: str
1389 :returns: The index of the tag closer.
1394 idx_opener = tag_filter.index(closer, start)
1395 return tag_filter.index(closer, idx_opener + 1)
1400 def _condition(tag_filter):
1401 """Create a conditional statement from the given tag filter.
1403 :param tag_filter: Filter based on tags from the element specification.
1404 :type tag_filter: str
1405 :returns: Conditional statement which can be evaluated.
1411 index = InputData._end_of_tag(tag_filter, index)
1415 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1417 def filter_data(self, element, params=None, data_set="tests",
1418 continue_on_error=False):
1419 """Filter required data from the given jobs and builds.
1421 The output data structure is:
1425 - test (or suite) 1 ID:
1431 - test (or suite) n ID:
1438 :param element: Element which will use the filtered data.
1439 :param params: Parameters which will be included in the output. If None,
1440 all parameters are included.
1441 :param data_set: The set of data to be filtered: tests, suites,
1443 :param continue_on_error: Continue if there is error while reading the
1444 data. The Item will be empty then
1445 :type element: pandas.Series
1448 :type continue_on_error: bool
1449 :returns: Filtered data.
1450 :rtype pandas.Series
1454 if element["filter"] in ("all", "template"):
1457 cond = InputData._condition(element["filter"])
1458 logging.debug(" Filter: {0}".format(cond))
1460 logging.error(" No filter defined.")
1464 params = element.get("parameters", None)
1466 params.append("type")
1470 for job, builds in element["data"].items():
1471 data[job] = pd.Series()
1472 for build in builds:
1473 data[job][str(build)] = pd.Series()
1475 data_iter = self.data[job][str(build)][data_set].\
1478 if continue_on_error:
1482 for test_ID, test_data in data_iter:
1483 if eval(cond, {"tags": test_data.get("tags", "")}):
1484 data[job][str(build)][test_ID] = pd.Series()
1486 for param, val in test_data.items():
1487 data[job][str(build)][test_ID][param] = val
1489 for param in params:
1491 data[job][str(build)][test_ID][param] =\
1494 data[job][str(build)][test_ID][param] =\
1498 except (KeyError, IndexError, ValueError) as err:
1499 logging.error(" Missing mandatory parameter in the element "
1500 "specification: {0}".format(err))
1502 except AttributeError:
1505 logging.error(" The filter '{0}' is not correct. Check if all "
1506 "tags are enclosed by apostrophes.".format(cond))
1510 def merge_data(data):
1511 """Merge data from more jobs and builds to a simple data structure.
1513 The output data structure is:
1515 - test (suite) 1 ID:
1521 - test (suite) n ID:
1524 :param data: Data to merge.
1525 :type data: pandas.Series
1526 :returns: Merged data.
1527 :rtype: pandas.Series
1530 logging.info(" Merging data ...")
1532 merged_data = pd.Series()
1533 for _, builds in data.iteritems():
1534 for _, item in builds.iteritems():
1535 for ID, item_data in item.iteritems():
1536 merged_data[ID] = item_data