1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
127 "parent": "Name of the parent of the test",
128 "doc": "Test documentation",
129 "msg": "Test message",
130 "tags": ["tag 1", "tag 2", "tag n"],
132 "status": "PASS" | "FAIL",
139 "parent": "Name of the parent of the test",
140 "doc": "Test documentation",
141 "msg": "Test message",
142 "tags": ["tag 1", "tag 2", "tag n"],
143 "type": "MRR" | "BMRR",
144 "status": "PASS" | "FAIL",
146 "receive-rate": AvgStdevMetadata,
150 # TODO: Remove when definitely no NDRPDRDISC tests are used:
154 "parent": "Name of the parent of the test",
155 "doc": "Test documentation",
156 "msg": "Test message",
157 "tags": ["tag 1", "tag 2", "tag n"],
158 "type": "PDR" | "NDR",
159 "status": "PASS" | "FAIL",
160 "throughput": { # Only type: "PDR" | "NDR"
162 "unit": "pps" | "bps" | "percentage"
164 "latency": { # Only type: "PDR" | "NDR"
171 "50": { # Only for NDR
176 "10": { # Only for NDR
188 "50": { # Only for NDR
193 "10": { # Only for NDR
200 "lossTolerance": "lossTolerance", # Only type: "PDR"
201 "conf-history": "DUT1 and DUT2 VAT History"
202 "show-run": "Show Run"
214 "metadata": { # Optional
215 "version": "VPP version",
216 "job": "Jenkins job name",
217 "build": "Information about the build"
221 "doc": "Suite 1 documentation",
222 "parent": "Suite 1 parent",
223 "level": "Level of the suite in the suite hierarchy"
226 "doc": "Suite N documentation",
227 "parent": "Suite 2 parent",
228 "level": "Level of the suite in the suite hierarchy"
234 "parent": "Name of the parent of the test",
235 "doc": "Test documentation"
236 "msg": "Test message"
237 "tags": ["tag 1", "tag 2", "tag n"],
238 "conf-history": "DUT1 and DUT2 VAT History"
239 "show-run": "Show Run"
240 "status": "PASS" | "FAIL"
248 .. note:: ID is the lowercase full path to the test.
251 # TODO: Remove when definitely no NDRPDRDISC tests are used:
252 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
255 r'PLRsearch upper bound::\s(\d+.\d+)')
257 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
258 r'NDR_UPPER:\s(\d+.\d+).*\n'
259 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
260 r'PDR_UPPER:\s(\d+.\d+)')
262 # TODO: Remove when definitely no NDRPDRDISC tests are used:
263 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
264 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
272 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
273 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
276 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
281 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
282 r"VPP Version:\s*|VPP version:\s*)(.*)")
284 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
296 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
298 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
300 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
302 def __init__(self, metadata, mapping, ignore):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 # Number of VAT History messages found:
335 # 1 - VAT History of DUT1
336 # 2 - VAT History of DUT2
337 self._lookup_kw_nr = 0
338 self._conf_history_lookup_nr = 0
340 # Number of Show Running messages found
342 # 1 - Show run message found
343 self._show_run_lookup_nr = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 "metadata": OrderedDict(),
352 "suites": OrderedDict(),
353 "tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data["metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 "timestamp": self._get_timestamp,
364 "vpp-version": self._get_vpp_version,
365 "dpdk-version": self._get_dpdk_version,
366 "teardown-vat-history": self._get_vat_history,
367 "teardown-papi-history": self._get_papi_history,
368 "test-show-runtime": self._get_show_run,
369 "testbed": self._get_testbed
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_testbed(self, msg):
382 """Called when extraction of testbed IP is required.
383 The testbed is identified by TG node IP address.
385 :param msg: Message to process.
390 if msg.message.count("Setup of TG node"):
391 reg_tg_ip = re.compile(
392 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
394 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
395 except (KeyError, ValueError, IndexError, AttributeError):
398 self._data["metadata"]["testbed"] = self._testbed
399 self._msg_type = None
401 def _get_vpp_version(self, msg):
402 """Called when extraction of VPP version is required.
404 :param msg: Message to process.
409 if msg.message.count("return STDOUT Version:") or \
410 msg.message.count("VPP Version:") or \
411 msg.message.count("VPP version:"):
412 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
414 self._data["metadata"]["version"] = self._version
415 self._msg_type = None
417 def _get_dpdk_version(self, msg):
418 """Called when extraction of DPDK version is required.
420 :param msg: Message to process.
425 if msg.message.count("DPDK Version:"):
427 self._version = str(re.search(
428 self.REGEX_VERSION_DPDK, msg.message). group(2))
429 self._data["metadata"]["version"] = self._version
433 self._msg_type = None
435 def _get_timestamp(self, msg):
436 """Called when extraction of timestamp is required.
438 :param msg: Message to process.
443 self._timestamp = msg.timestamp[:14]
444 self._data["metadata"]["generated"] = self._timestamp
445 self._msg_type = None
447 def _get_vat_history(self, msg):
448 """Called when extraction of VAT command history is required.
450 :param msg: Message to process.
454 if msg.message.count("VAT command history:"):
455 self._conf_history_lookup_nr += 1
456 if self._conf_history_lookup_nr == 1:
457 self._data["tests"][self._test_ID]["conf-history"] = str()
459 self._msg_type = None
460 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461 "VAT command history:", "", msg.message, count=1). \
462 replace("\n\n", "\n").replace('\n', ' |br| ').\
463 replace('\r', '').replace('"', "'")
465 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
466 self._data["tests"][self._test_ID]["conf-history"] += \
467 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
469 def _get_papi_history(self, msg):
470 """Called when extraction of PAPI command history is required.
472 :param msg: Message to process.
476 if msg.message.count("PAPI command history:"):
477 self._conf_history_lookup_nr += 1
478 if self._conf_history_lookup_nr == 1:
479 self._data["tests"][self._test_ID]["conf-history"] = str()
481 self._msg_type = None
482 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
483 "PAPI command history:", "", msg.message, count=1). \
484 replace("\n\n", "\n").replace('\n', ' |br| ').\
485 replace('\r', '').replace('"', "'")
487 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
488 self._data["tests"][self._test_ID]["conf-history"] += \
489 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
491 def _get_show_run(self, msg):
492 """Called when extraction of VPP operational data (output of CLI command
493 Show Runtime) is required.
495 :param msg: Message to process.
499 if msg.message.count("Runtime:"):
500 self._show_run_lookup_nr += 1
501 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
502 self._data["tests"][self._test_ID]["show-run"] = str()
503 if self._lookup_kw_nr > 1:
504 self._msg_type = None
505 if self._show_run_lookup_nr == 1:
506 message = str(msg.message).replace(' ', '').replace('\n', '').\
507 replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
508 runtime = loads(message)
510 threads_nr = len(runtime[0]["clocks"])
511 except (IndexError, KeyError):
513 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
514 table = [[tbl_hdr, ] for _ in range(threads_nr)]
516 for idx in range(threads_nr):
520 item["vectors"][idx],
521 item["suspends"][idx],
525 for idx in range(threads_nr):
526 text += "Thread {idx} ".format(idx=idx)
527 text += "vpp_main\n" if idx == 0 else \
528 "vpp_wk_{idx}\n".format(idx=idx-1)
530 for row in table[idx]:
531 if txt_table is None:
532 txt_table = prettytable.PrettyTable(row)
535 txt_table.add_row(row)
536 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
537 txt_table.align["Name"] = "l"
538 txt_table.align["Calls"] = "r"
539 txt_table.align["Vectors"] = "r"
540 txt_table.align["Suspends"] = "r"
541 txt_table.align["Clocks"] = "r"
543 text += txt_table.get_string(sortby="Name") + '\n'
545 text = text.replace('\n', ' |br| ').replace('\r', '').\
548 self._data["tests"][self._test_ID]["show-run"] += " |br| "
549 self._data["tests"][self._test_ID]["show-run"] += \
550 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
554 # TODO: Remove when definitely no NDRPDRDISC tests are used:
555 def _get_latency(self, msg, test_type):
556 """Get the latency data from the test message.
558 :param msg: Message to be parsed.
559 :param test_type: Type of the test - NDR or PDR.
562 :returns: Latencies parsed from the message.
566 if test_type == "NDR":
567 groups = re.search(self.REGEX_LAT_NDR, msg)
568 groups_range = range(1, 7)
569 elif test_type == "PDR":
570 groups = re.search(self.REGEX_LAT_PDR, msg)
571 groups_range = range(1, 3)
576 for idx in groups_range:
578 lat = [int(item) for item in str(groups.group(idx)).split('/')]
579 except (AttributeError, ValueError):
581 latencies.append(lat)
583 keys = ("min", "avg", "max")
591 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
592 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
593 if test_type == "NDR":
594 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
595 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
596 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
597 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
601 def _get_ndrpdr_throughput(self, msg):
602 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
605 :param msg: The test message to be parsed.
607 :returns: Parsed data as a dict and the status (PASS/FAIL).
608 :rtype: tuple(dict, str)
612 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
613 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
616 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
618 if groups is not None:
620 throughput["NDR"]["LOWER"] = float(groups.group(1))
621 throughput["NDR"]["UPPER"] = float(groups.group(2))
622 throughput["PDR"]["LOWER"] = float(groups.group(3))
623 throughput["PDR"]["UPPER"] = float(groups.group(4))
625 except (IndexError, ValueError):
628 return throughput, status
630 def _get_plr_throughput(self, msg):
631 """Get PLRsearch lower bound and PLRsearch upper bound from the test
634 :param msg: The test message to be parsed.
636 :returns: Parsed data as a dict and the status (PASS/FAIL).
637 :rtype: tuple(dict, str)
645 groups = re.search(self.REGEX_PLR_RATE, msg)
647 if groups is not None:
649 throughput["LOWER"] = float(groups.group(1))
650 throughput["UPPER"] = float(groups.group(2))
652 except (IndexError, ValueError):
655 return throughput, status
657 def _get_ndrpdr_latency(self, msg):
658 """Get LATENCY from the test message.
660 :param msg: The test message to be parsed.
662 :returns: Parsed data as a dict and the status (PASS/FAIL).
663 :rtype: tuple(dict, str)
668 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
669 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
672 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
673 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
677 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
679 if groups is not None:
680 keys = ("min", "avg", "max")
682 latency["NDR"]["direction1"] = dict(
683 zip(keys, [float(l) for l in groups.group(1).split('/')]))
684 latency["NDR"]["direction2"] = dict(
685 zip(keys, [float(l) for l in groups.group(2).split('/')]))
686 latency["PDR"]["direction1"] = dict(
687 zip(keys, [float(l) for l in groups.group(3).split('/')]))
688 latency["PDR"]["direction2"] = dict(
689 zip(keys, [float(l) for l in groups.group(4).split('/')]))
691 except (IndexError, ValueError):
694 return latency, status
696 def visit_suite(self, suite):
697 """Implements traversing through the suite and its direct children.
699 :param suite: Suite to process.
703 if self.start_suite(suite) is not False:
704 suite.suites.visit(self)
705 suite.tests.visit(self)
706 self.end_suite(suite)
708 def start_suite(self, suite):
709 """Called when suite starts.
711 :param suite: Suite to process.
717 parent_name = suite.parent.name
718 except AttributeError:
721 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
722 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
723 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
725 self._data["suites"][suite.longname.lower().replace('"', "'").
726 replace(" ", "_")] = {
727 "name": suite.name.lower(),
729 "parent": parent_name,
730 "level": len(suite.longname.split("."))
733 suite.keywords.visit(self)
735 def end_suite(self, suite):
736 """Called when suite ends.
738 :param suite: Suite to process.
744 def visit_test(self, test):
745 """Implements traversing through the test.
747 :param test: Test to process.
751 if self.start_test(test) is not False:
752 test.keywords.visit(self)
755 def start_test(self, test):
756 """Called when test starts.
758 :param test: Test to process.
763 longname_orig = test.longname.lower()
765 # Check the ignore list
766 if longname_orig in self._ignore:
769 tags = [str(tag) for tag in test.tags]
772 # Change the TC long name and name if defined in the mapping table
773 longname = self._mapping.get(longname_orig, None)
774 if longname is not None:
775 name = longname.split('.')[-1]
776 logging.debug("{0}\n{1}\n{2}\n{3}".format(
777 self._data["metadata"], longname_orig, longname, name))
779 longname = longname_orig
780 name = test.name.lower()
782 # Remove TC number from the TC long name (backward compatibility):
783 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
784 # Remove TC number from the TC name (not needed):
785 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
787 test_result["parent"] = test.parent.name.lower()
788 test_result["tags"] = tags
789 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
790 replace('\r', '').replace('[', ' |br| [')
791 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
792 test_result["msg"] = test.message.replace('\n', ' |br| '). \
793 replace('\r', '').replace('"', "'")
794 test_result["type"] = "FUNC"
795 test_result["status"] = test.status
797 if "PERFTEST" in tags:
798 # Replace info about cores (e.g. -1c-) with the info about threads
799 # and cores (e.g. -1t1c-) in the long test case names and in the
800 # test case names if necessary.
801 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
804 for tag in test_result["tags"]:
805 groups = re.search(self.REGEX_TC_TAG, tag)
811 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
812 "-{0}-".format(tag_tc.lower()),
815 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
816 "-{0}-".format(tag_tc.lower()),
820 test_result["status"] = "FAIL"
821 self._data["tests"][self._test_ID] = test_result
822 logging.debug("The test '{0}' has no or more than one "
823 "multi-threading tags.".format(self._test_ID))
824 logging.debug("Tags: {0}".format(test_result["tags"]))
827 if test.status == "PASS" and ("NDRPDRDISC" in tags or
833 # TODO: Remove when definitely no NDRPDRDISC tests are used:
834 if "NDRDISC" in tags:
835 test_result["type"] = "NDR"
836 # TODO: Remove when definitely no NDRPDRDISC tests are used:
837 elif "PDRDISC" in tags:
838 test_result["type"] = "PDR"
839 elif "NDRPDR" in tags:
840 test_result["type"] = "NDRPDR"
842 test_result["type"] = "SOAK"
844 test_result["type"] = "TCP"
846 test_result["type"] = "MRR"
847 elif "FRMOBL" in tags or "BMRR" in tags:
848 test_result["type"] = "BMRR"
850 test_result["status"] = "FAIL"
851 self._data["tests"][self._test_ID] = test_result
854 # TODO: Remove when definitely no NDRPDRDISC tests are used:
855 if test_result["type"] in ("NDR", "PDR"):
857 rate_value = str(re.search(
858 self.REGEX_RATE, test.message).group(1))
859 except AttributeError:
862 rate_unit = str(re.search(
863 self.REGEX_RATE, test.message).group(2))
864 except AttributeError:
867 test_result["throughput"] = dict()
868 test_result["throughput"]["value"] = \
869 int(rate_value.split('.')[0])
870 test_result["throughput"]["unit"] = rate_unit
871 test_result["latency"] = \
872 self._get_latency(test.message, test_result["type"])
873 if test_result["type"] == "PDR":
874 test_result["lossTolerance"] = str(re.search(
875 self.REGEX_TOLERANCE, test.message).group(1))
877 elif test_result["type"] in ("NDRPDR", ):
878 test_result["throughput"], test_result["status"] = \
879 self._get_ndrpdr_throughput(test.message)
880 test_result["latency"], test_result["status"] = \
881 self._get_ndrpdr_latency(test.message)
883 elif test_result["type"] in ("SOAK", ):
884 test_result["throughput"], test_result["status"] = \
885 self._get_plr_throughput(test.message)
887 elif test_result["type"] in ("TCP", ):
888 groups = re.search(self.REGEX_TCP, test.message)
889 test_result["result"] = int(groups.group(2))
891 elif test_result["type"] in ("MRR", "BMRR"):
892 test_result["result"] = dict()
893 groups = re.search(self.REGEX_BMRR, test.message)
894 if groups is not None:
895 items_str = groups.group(1)
896 items_float = [float(item.strip()) for item
897 in items_str.split(",")]
898 metadata = AvgStdevMetadataFactory.from_data(items_float)
899 # Next two lines have been introduced in CSIT-1179,
900 # to be removed in CSIT-1180.
903 test_result["result"]["receive-rate"] = metadata
905 groups = re.search(self.REGEX_MRR, test.message)
906 test_result["result"]["receive-rate"] = \
907 AvgStdevMetadataFactory.from_data([
908 float(groups.group(3)) / float(groups.group(1)), ])
910 self._data["tests"][self._test_ID] = test_result
912 def end_test(self, test):
913 """Called when test ends.
915 :param test: Test to process.
921 def visit_keyword(self, keyword):
922 """Implements traversing through the keyword and its child keywords.
924 :param keyword: Keyword to process.
925 :type keyword: Keyword
928 if self.start_keyword(keyword) is not False:
929 self.end_keyword(keyword)
931 def start_keyword(self, keyword):
932 """Called when keyword starts. Default implementation does nothing.
934 :param keyword: Keyword to process.
935 :type keyword: Keyword
939 if keyword.type == "setup":
940 self.visit_setup_kw(keyword)
941 elif keyword.type == "teardown":
942 self._lookup_kw_nr = 0
943 self.visit_teardown_kw(keyword)
945 self._lookup_kw_nr = 0
946 self.visit_test_kw(keyword)
947 except AttributeError:
950 def end_keyword(self, keyword):
951 """Called when keyword ends. Default implementation does nothing.
953 :param keyword: Keyword to process.
954 :type keyword: Keyword
959 def visit_test_kw(self, test_kw):
960 """Implements traversing through the test keyword and its child
963 :param test_kw: Keyword to process.
964 :type test_kw: Keyword
967 for keyword in test_kw.keywords:
968 if self.start_test_kw(keyword) is not False:
969 self.visit_test_kw(keyword)
970 self.end_test_kw(keyword)
972 def start_test_kw(self, test_kw):
973 """Called when test keyword starts. Default implementation does
976 :param test_kw: Keyword to process.
977 :type test_kw: Keyword
980 if test_kw.name.count("Show Runtime Counters On All Duts"):
981 self._lookup_kw_nr += 1
982 self._show_run_lookup_nr = 0
983 self._msg_type = "test-show-runtime"
984 elif test_kw.name.count("Install Dpdk Test") and not self._version:
985 self._msg_type = "dpdk-version"
988 test_kw.messages.visit(self)
990 def end_test_kw(self, test_kw):
991 """Called when keyword ends. Default implementation does nothing.
993 :param test_kw: Keyword to process.
994 :type test_kw: Keyword
999 def visit_setup_kw(self, setup_kw):
1000 """Implements traversing through the teardown keyword and its child
1003 :param setup_kw: Keyword to process.
1004 :type setup_kw: Keyword
1007 for keyword in setup_kw.keywords:
1008 if self.start_setup_kw(keyword) is not False:
1009 self.visit_setup_kw(keyword)
1010 self.end_setup_kw(keyword)
1012 def start_setup_kw(self, setup_kw):
1013 """Called when teardown keyword starts. Default implementation does
1016 :param setup_kw: Keyword to process.
1017 :type setup_kw: Keyword
1020 if setup_kw.name.count("Show Vpp Version On All Duts") \
1021 and not self._version:
1022 self._msg_type = "vpp-version"
1023 elif setup_kw.name.count("Set Global Variable") \
1024 and not self._timestamp:
1025 self._msg_type = "timestamp"
1026 elif setup_kw.name.count("Setup Framework") and not self._testbed:
1027 self._msg_type = "testbed"
1030 setup_kw.messages.visit(self)
1032 def end_setup_kw(self, setup_kw):
1033 """Called when keyword ends. Default implementation does nothing.
1035 :param setup_kw: Keyword to process.
1036 :type setup_kw: Keyword
1041 def visit_teardown_kw(self, teardown_kw):
1042 """Implements traversing through the teardown keyword and its child
1045 :param teardown_kw: Keyword to process.
1046 :type teardown_kw: Keyword
1049 for keyword in teardown_kw.keywords:
1050 if self.start_teardown_kw(keyword) is not False:
1051 self.visit_teardown_kw(keyword)
1052 self.end_teardown_kw(keyword)
1054 def start_teardown_kw(self, teardown_kw):
1055 """Called when teardown keyword starts. Default implementation does
1058 :param teardown_kw: Keyword to process.
1059 :type teardown_kw: Keyword
1063 if teardown_kw.name.count("Show Vat History On All Duts"):
1064 self._conf_history_lookup_nr = 0
1065 self._msg_type = "teardown-vat-history"
1066 teardown_kw.messages.visit(self)
1067 elif teardown_kw.name.count("Show Papi History On All Duts"):
1068 self._conf_history_lookup_nr = 0
1069 self._msg_type = "teardown-papi-history"
1070 teardown_kw.messages.visit(self)
1072 def end_teardown_kw(self, teardown_kw):
1073 """Called when keyword ends. Default implementation does nothing.
1075 :param teardown_kw: Keyword to process.
1076 :type teardown_kw: Keyword
1081 def visit_message(self, msg):
1082 """Implements visiting the message.
1084 :param msg: Message to process.
1088 if self.start_message(msg) is not False:
1089 self.end_message(msg)
1091 def start_message(self, msg):
1092 """Called when message starts. Get required information from messages:
1095 :param msg: Message to process.
1101 self.parse_msg[self._msg_type](msg)
1103 def end_message(self, msg):
1104 """Called when message ends. Default implementation does nothing.
1106 :param msg: Message to process.
1113 class InputData(object):
1116 The data is extracted from output.xml files generated by Jenkins jobs and
1117 stored in pandas' DataFrames.
1123 (as described in ExecutionChecker documentation)
1125 (as described in ExecutionChecker documentation)
1127 (as described in ExecutionChecker documentation)
1130 def __init__(self, spec):
1133 :param spec: Specification.
1134 :type spec: Specification
1141 self._input_data = pd.Series()
1145 """Getter - Input data.
1147 :returns: Input data
1148 :rtype: pandas.Series
1150 return self._input_data
1152 def metadata(self, job, build):
1153 """Getter - metadata
1155 :param job: Job which metadata we want.
1156 :param build: Build which metadata we want.
1160 :rtype: pandas.Series
1163 return self.data[job][build]["metadata"]
1165 def suites(self, job, build):
1168 :param job: Job which suites we want.
1169 :param build: Build which suites we want.
1173 :rtype: pandas.Series
1176 return self.data[job][str(build)]["suites"]
1178 def tests(self, job, build):
1181 :param job: Job which tests we want.
1182 :param build: Build which tests we want.
1186 :rtype: pandas.Series
1189 return self.data[job][build]["tests"]
1191 def _parse_tests(self, job, build, log):
1192 """Process data from robot output.xml file and return JSON structured
1195 :param job: The name of job which build output data will be processed.
1196 :param build: The build which output data will be processed.
1197 :param log: List of log messages.
1200 :type log: list of tuples (severity, msg)
1201 :returns: JSON data structure.
1210 with open(build["file-name"], 'r') as data_file:
1212 result = ExecutionResult(data_file)
1213 except errors.DataError as err:
1214 log.append(("ERROR", "Error occurred while parsing output.xml: "
1217 checker = ExecutionChecker(metadata, self._cfg.mapping,
1219 result.visit(checker)
1223 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1224 """Download and parse the input data file.
1226 :param pid: PID of the process executing this method.
1227 :param job: Name of the Jenkins job which generated the processed input
1229 :param build: Information about the Jenkins build which generated the
1230 processed input file.
1231 :param repeat: Repeat the download specified number of times if not
1241 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1242 format(job, build["build"])))
1249 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1255 logs.append(("ERROR", "It is not possible to download the input "
1256 "data file from the job '{job}', build "
1257 "'{build}', or it is damaged. Skipped.".
1258 format(job=job, build=build["build"])))
1260 logs.append(("INFO", " Processing data from the build '{0}' ...".
1261 format(build["build"])))
1262 data = self._parse_tests(job, build, logs)
1264 logs.append(("ERROR", "Input data file from the job '{job}', "
1265 "build '{build}' is damaged. Skipped.".
1266 format(job=job, build=build["build"])))
1271 remove(build["file-name"])
1272 except OSError as err:
1273 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1274 format(build["file-name"], repr(err))))
1276 # If the time-period is defined in the specification file, remove all
1277 # files which are outside the time period.
1278 timeperiod = self._cfg.input.get("time-period", None)
1279 if timeperiod and data:
1281 timeperiod = timedelta(int(timeperiod))
1282 metadata = data.get("metadata", None)
1284 generated = metadata.get("generated", None)
1286 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1287 if (now - generated) > timeperiod:
1288 # Remove the data and the file:
1293 " The build {job}/{build} is outdated, will be "
1294 "removed".format(job=job, build=build["build"])))
1295 file_name = self._cfg.input["file-name"]
1297 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1298 "{job}{sep}{build}{sep}{name}".format(
1301 build=build["build"],
1305 logs.append(("INFO",
1306 " The file {name} has been removed".
1307 format(name=full_name)))
1308 except OSError as err:
1309 logs.append(("ERROR",
1310 "Cannot remove the file '{0}': {1}".
1311 format(full_name, repr(err))))
1312 logs.append(("INFO", " Done."))
1314 for level, line in logs:
1317 elif level == "ERROR":
1319 elif level == "DEBUG":
1321 elif level == "CRITICAL":
1322 logging.critical(line)
1323 elif level == "WARNING":
1324 logging.warning(line)
1326 return {"data": data, "state": state, "job": job, "build": build}
1328 def download_and_parse_data(self, repeat=1):
1329 """Download the input data files, parse input data from input files and
1330 store in pandas' Series.
1332 :param repeat: Repeat the download specified number of times if not
1337 logging.info("Downloading and parsing input files ...")
1339 for job, builds in self._cfg.builds.items():
1340 for build in builds:
1342 result = self._download_and_parse_build(job, build, repeat)
1343 build_nr = result["build"]["build"]
1346 data = result["data"]
1347 build_data = pd.Series({
1348 "metadata": pd.Series(
1349 data["metadata"].values(),
1350 index=data["metadata"].keys()),
1351 "suites": pd.Series(data["suites"].values(),
1352 index=data["suites"].keys()),
1353 "tests": pd.Series(data["tests"].values(),
1354 index=data["tests"].keys())})
1356 if self._input_data.get(job, None) is None:
1357 self._input_data[job] = pd.Series()
1358 self._input_data[job][str(build_nr)] = build_data
1360 self._cfg.set_input_file_name(
1361 job, build_nr, result["build"]["file-name"])
1363 self._cfg.set_input_state(job, build_nr, result["state"])
1365 logging.info("Memory allocation: {0:,d}MB".format(
1366 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1368 logging.info("Done.")
1371 def _end_of_tag(tag_filter, start=0, closer="'"):
1372 """Return the index of character in the string which is the end of tag.
1374 :param tag_filter: The string where the end of tag is being searched.
1375 :param start: The index where the searching is stated.
1376 :param closer: The character which is the tag closer.
1377 :type tag_filter: str
1380 :returns: The index of the tag closer.
1385 idx_opener = tag_filter.index(closer, start)
1386 return tag_filter.index(closer, idx_opener + 1)
1391 def _condition(tag_filter):
1392 """Create a conditional statement from the given tag filter.
1394 :param tag_filter: Filter based on tags from the element specification.
1395 :type tag_filter: str
1396 :returns: Conditional statement which can be evaluated.
1402 index = InputData._end_of_tag(tag_filter, index)
1406 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1408 def filter_data(self, element, params=None, data_set="tests",
1409 continue_on_error=False):
1410 """Filter required data from the given jobs and builds.
1412 The output data structure is:
1416 - test (or suite) 1 ID:
1422 - test (or suite) n ID:
1429 :param element: Element which will use the filtered data.
1430 :param params: Parameters which will be included in the output. If None,
1431 all parameters are included.
1432 :param data_set: The set of data to be filtered: tests, suites,
1434 :param continue_on_error: Continue if there is error while reading the
1435 data. The Item will be empty then
1436 :type element: pandas.Series
1439 :type continue_on_error: bool
1440 :returns: Filtered data.
1441 :rtype pandas.Series
1445 if element["filter"] in ("all", "template"):
1448 cond = InputData._condition(element["filter"])
1449 logging.debug(" Filter: {0}".format(cond))
1451 logging.error(" No filter defined.")
1455 params = element.get("parameters", None)
1457 params.append("type")
1461 for job, builds in element["data"].items():
1462 data[job] = pd.Series()
1463 for build in builds:
1464 data[job][str(build)] = pd.Series()
1466 data_iter = self.data[job][str(build)][data_set].\
1469 if continue_on_error:
1473 for test_ID, test_data in data_iter:
1474 if eval(cond, {"tags": test_data.get("tags", "")}):
1475 data[job][str(build)][test_ID] = pd.Series()
1477 for param, val in test_data.items():
1478 data[job][str(build)][test_ID][param] = val
1480 for param in params:
1482 data[job][str(build)][test_ID][param] =\
1485 data[job][str(build)][test_ID][param] =\
1489 except (KeyError, IndexError, ValueError) as err:
1490 logging.error(" Missing mandatory parameter in the element "
1491 "specification: {0}".format(err))
1493 except AttributeError:
1496 logging.error(" The filter '{0}' is not correct. Check if all "
1497 "tags are enclosed by apostrophes.".format(cond))
1500 def filter_tests_by_name(self, element, params=None, data_set="tests",
1501 continue_on_error=False):
1502 """Filter required data from the given jobs and builds.
1504 The output data structure is:
1508 - test (or suite) 1 ID:
1514 - test (or suite) n ID:
1521 :param element: Element which will use the filtered data.
1522 :param params: Parameters which will be included in the output. If None,
1523 all parameters are included.
1524 :param data_set: The set of data to be filtered: tests, suites,
1526 :param continue_on_error: Continue if there is error while reading the
1527 data. The Item will be empty then
1528 :type element: pandas.Series
1531 :type continue_on_error: bool
1532 :returns: Filtered data.
1533 :rtype pandas.Series
1536 include = element.get("include", None)
1538 logging.warning("No tests to include, skipping the element.")
1542 params = element.get("parameters", None)
1544 params.append("type")
1548 for job, builds in element["data"].items():
1549 data[job] = pd.Series()
1550 for build in builds:
1551 data[job][str(build)] = pd.Series()
1552 for test in include:
1554 reg_ex = re.compile(str(test).lower())
1555 for test_ID in self.data[job][str(build)]\
1557 if re.match(reg_ex, str(test_ID).lower()):
1558 test_data = self.data[job][str(build)]\
1560 data[job][str(build)][test_ID] = pd.Series()
1562 for param, val in test_data.items():
1563 data[job][str(build)][test_ID]\
1566 for param in params:
1568 data[job][str(build)][test_ID]\
1569 [param] = test_data[param]
1571 data[job][str(build)][test_ID]\
1573 except KeyError as err:
1574 logging.error("{err!r}".format(err=err))
1575 if continue_on_error:
1581 except (KeyError, IndexError, ValueError) as err:
1582 logging.error("Missing mandatory parameter in the element "
1583 "specification: {err!r}".format(err=err))
1585 except AttributeError as err:
1586 logging.error("{err!r}".format(err=err))
1591 def merge_data(data):
1592 """Merge data from more jobs and builds to a simple data structure.
1594 The output data structure is:
1596 - test (suite) 1 ID:
1602 - test (suite) n ID:
1605 :param data: Data to merge.
1606 :type data: pandas.Series
1607 :returns: Merged data.
1608 :rtype: pandas.Series
1611 logging.info(" Merging data ...")
1613 merged_data = pd.Series()
1614 for _, builds in data.iteritems():
1615 for _, item in builds.iteritems():
1616 for ID, item_data in item.iteritems():
1617 merged_data[ID] = item_data