1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
42 # Separator used in file names
46 class ExecutionChecker(ResultVisitor):
47 """Class to traverse through the test suite structure.
49 The functionality implemented in this class generates a json structure:
55 "generated": "Timestamp",
56 "version": "SUT version",
57 "job": "Jenkins job name",
58 "build": "Information about the build"
61 "Suite long name 1": {
63 "doc": "Suite 1 documentation",
64 "parent": "Suite 1 parent",
65 "level": "Level of the suite in the suite hierarchy"
67 "Suite long name N": {
69 "doc": "Suite N documentation",
70 "parent": "Suite 2 parent",
71 "level": "Level of the suite in the suite hierarchy"
78 "parent": "Name of the parent of the test",
79 "doc": "Test documentation",
80 "msg": "Test message",
81 "conf-history": "DUT1 and DUT2 VAT History",
82 "show-run": "Show Run",
83 "tags": ["tag 1", "tag 2", "tag n"],
85 "status": "PASS" | "FAIL",
127 "parent": "Name of the parent of the test",
128 "doc": "Test documentation",
129 "msg": "Test message",
130 "tags": ["tag 1", "tag 2", "tag n"],
132 "status": "PASS" | "FAIL",
139 "parent": "Name of the parent of the test",
140 "doc": "Test documentation",
141 "msg": "Test message",
142 "tags": ["tag 1", "tag 2", "tag n"],
143 "type": "MRR" | "BMRR",
144 "status": "PASS" | "FAIL",
146 "receive-rate": AvgStdevMetadata,
150 # TODO: Remove when definitely no NDRPDRDISC tests are used:
154 "parent": "Name of the parent of the test",
155 "doc": "Test documentation",
156 "msg": "Test message",
157 "tags": ["tag 1", "tag 2", "tag n"],
158 "type": "PDR" | "NDR",
159 "status": "PASS" | "FAIL",
160 "throughput": { # Only type: "PDR" | "NDR"
162 "unit": "pps" | "bps" | "percentage"
164 "latency": { # Only type: "PDR" | "NDR"
171 "50": { # Only for NDR
176 "10": { # Only for NDR
188 "50": { # Only for NDR
193 "10": { # Only for NDR
200 "lossTolerance": "lossTolerance", # Only type: "PDR"
201 "conf-history": "DUT1 and DUT2 VAT History"
202 "show-run": "Show Run"
214 "metadata": { # Optional
215 "version": "VPP version",
216 "job": "Jenkins job name",
217 "build": "Information about the build"
221 "doc": "Suite 1 documentation",
222 "parent": "Suite 1 parent",
223 "level": "Level of the suite in the suite hierarchy"
226 "doc": "Suite N documentation",
227 "parent": "Suite 2 parent",
228 "level": "Level of the suite in the suite hierarchy"
234 "parent": "Name of the parent of the test",
235 "doc": "Test documentation"
236 "msg": "Test message"
237 "tags": ["tag 1", "tag 2", "tag n"],
238 "conf-history": "DUT1 and DUT2 VAT History"
239 "show-run": "Show Run"
240 "status": "PASS" | "FAIL"
248 .. note:: ID is the lowercase full path to the test.
251 # TODO: Remove when definitely no NDRPDRDISC tests are used:
252 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
254 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::\s(\d+.\d+).*\n'
255 r'PLRsearch upper bound::\s(\d+.\d+)')
257 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
258 r'NDR_UPPER:\s(\d+.\d+).*\n'
259 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
260 r'PDR_UPPER:\s(\d+.\d+)')
262 # TODO: Remove when definitely no NDRPDRDISC tests are used:
263 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
264 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
268 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
269 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
271 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
272 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
273 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
275 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
276 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
278 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
281 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
282 r"VPP Version:\s*|VPP version:\s*)(.*)")
284 REGEX_VERSION_DPDK = re.compile(r"DPDK Version: (\d*.\d*)")
286 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
288 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
289 r'tx\s(\d*),\srx\s(\d*)')
291 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
292 r' in packets per second: \[(.*)\]')
294 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
296 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
298 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
300 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
302 def __init__(self, metadata, mapping, ignore):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 # Number of VAT History messages found:
335 # 1 - VAT History of DUT1
336 # 2 - VAT History of DUT2
337 self._lookup_kw_nr = 0
338 self._conf_history_lookup_nr = 0
340 # Number of Show Running messages found
342 # 1 - Show run message found
343 self._show_run_lookup_nr = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 "metadata": OrderedDict(),
352 "suites": OrderedDict(),
353 "tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data["metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 "timestamp": self._get_timestamp,
364 "vpp-version": self._get_vpp_version,
365 "dpdk-version": self._get_dpdk_version,
366 "teardown-vat-history": self._get_vat_history,
367 "teardown-papi-history": self._get_papi_history,
368 "test-show-runtime": self._get_show_run,
369 "testbed": self._get_testbed
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_testbed(self, msg):
382 """Called when extraction of testbed IP is required.
383 The testbed is identified by TG node IP address.
385 :param msg: Message to process.
390 if msg.message.count("Setup of TG node"):
391 reg_tg_ip = re.compile(
392 r'Setup of TG node (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}) done')
394 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
395 except (KeyError, ValueError, IndexError, AttributeError):
398 self._data["metadata"]["testbed"] = self._testbed
399 self._msg_type = None
401 def _get_vpp_version(self, msg):
402 """Called when extraction of VPP version is required.
404 :param msg: Message to process.
409 if msg.message.count("return STDOUT Version:") or \
410 msg.message.count("VPP Version:") or \
411 msg.message.count("VPP version:"):
412 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
414 self._data["metadata"]["version"] = self._version
415 self._msg_type = None
417 def _get_dpdk_version(self, msg):
418 """Called when extraction of DPDK version is required.
420 :param msg: Message to process.
425 if msg.message.count("DPDK Version:"):
427 self._version = str(re.search(
428 self.REGEX_VERSION_DPDK, msg.message). group(1))
429 self._data["metadata"]["version"] = self._version
433 self._msg_type = None
435 def _get_timestamp(self, msg):
436 """Called when extraction of timestamp is required.
438 :param msg: Message to process.
443 self._timestamp = msg.timestamp[:14]
444 self._data["metadata"]["generated"] = self._timestamp
445 self._msg_type = None
447 def _get_vat_history(self, msg):
448 """Called when extraction of VAT command history is required.
450 :param msg: Message to process.
454 if msg.message.count("VAT command history:"):
455 self._conf_history_lookup_nr += 1
456 if self._conf_history_lookup_nr == 1:
457 self._data["tests"][self._test_ID]["conf-history"] = str()
459 self._msg_type = None
460 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
461 "VAT command history:", "", msg.message, count=1). \
462 replace("\n\n", "\n").replace('\n', ' |br| ').\
463 replace('\r', '').replace('"', "'")
465 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
466 self._data["tests"][self._test_ID]["conf-history"] += \
467 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
469 def _get_papi_history(self, msg):
470 """Called when extraction of PAPI command history is required.
472 :param msg: Message to process.
476 if msg.message.count("PAPI command history:"):
477 self._conf_history_lookup_nr += 1
478 if self._conf_history_lookup_nr == 1:
479 self._data["tests"][self._test_ID]["conf-history"] = str()
481 self._msg_type = None
482 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
483 "PAPI command history:", "", msg.message, count=1). \
484 replace("\n\n", "\n").replace('\n', ' |br| ').\
485 replace('\r', '').replace('"', "'")
487 self._data["tests"][self._test_ID]["conf-history"] += " |br| "
488 self._data["tests"][self._test_ID]["conf-history"] += \
489 "**DUT" + str(self._conf_history_lookup_nr) + ":** " + text
491 def _get_show_run(self, msg):
492 """Called when extraction of VPP operational data (output of CLI command
493 Show Runtime) is required.
495 :param msg: Message to process.
499 if msg.message.count("Runtime:"):
500 self._show_run_lookup_nr += 1
501 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
502 self._data["tests"][self._test_ID]["show-run"] = str()
503 if self._lookup_kw_nr > 1:
504 self._msg_type = None
505 if self._show_run_lookup_nr == 1:
506 message = str(msg.message).replace(' ', '').replace('\n', '').\
507 replace("'", '"').replace('b"', '"').replace('u"', '"')[8:]
508 runtime = loads(message)
510 threads_nr = len(runtime[0]["clocks"])
511 except (IndexError, KeyError):
513 tbl_hdr = ["Name", "Calls", "Vectors", "Suspends", "Clocks"]
514 table = [[tbl_hdr, ] for _ in range(threads_nr)]
516 for idx in range(threads_nr):
520 item["vectors"][idx],
521 item["suspends"][idx],
525 for idx in range(threads_nr):
526 text += "Thread {idx} ".format(idx=idx)
527 text += "vpp_main\n" if idx == 0 else \
528 "vpp_wk_{idx}\n".format(idx=idx-1)
530 for row in table[idx]:
531 if txt_table is None:
532 txt_table = prettytable.PrettyTable(row)
535 txt_table.add_row(row)
536 txt_table.align["Name"] = "l"
537 txt_table.align["Calls"] = "r"
538 txt_table.align["Vectors"] = "r"
539 txt_table.align["Suspends"] = "r"
540 txt_table.align["Clocks"] = "r"
542 text += txt_table.get_string(sortby="Name") + '\n'
544 text = text.replace('\n', ' |br| ').replace('\r', '').\
547 self._data["tests"][self._test_ID]["show-run"] += " |br| "
548 self._data["tests"][self._test_ID]["show-run"] += \
549 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
553 # TODO: Remove when definitely no NDRPDRDISC tests are used:
554 def _get_latency(self, msg, test_type):
555 """Get the latency data from the test message.
557 :param msg: Message to be parsed.
558 :param test_type: Type of the test - NDR or PDR.
561 :returns: Latencies parsed from the message.
565 if test_type == "NDR":
566 groups = re.search(self.REGEX_LAT_NDR, msg)
567 groups_range = range(1, 7)
568 elif test_type == "PDR":
569 groups = re.search(self.REGEX_LAT_PDR, msg)
570 groups_range = range(1, 3)
575 for idx in groups_range:
577 lat = [int(item) for item in str(groups.group(idx)).split('/')]
578 except (AttributeError, ValueError):
580 latencies.append(lat)
582 keys = ("min", "avg", "max")
590 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
591 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
592 if test_type == "NDR":
593 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
594 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
595 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
596 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
600 def _get_ndrpdr_throughput(self, msg):
601 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
604 :param msg: The test message to be parsed.
606 :returns: Parsed data as a dict and the status (PASS/FAIL).
607 :rtype: tuple(dict, str)
611 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
612 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
615 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
617 if groups is not None:
619 throughput["NDR"]["LOWER"] = float(groups.group(1))
620 throughput["NDR"]["UPPER"] = float(groups.group(2))
621 throughput["PDR"]["LOWER"] = float(groups.group(3))
622 throughput["PDR"]["UPPER"] = float(groups.group(4))
624 except (IndexError, ValueError):
627 return throughput, status
629 def _get_plr_throughput(self, msg):
630 """Get PLRsearch lower bound and PLRsearch upper bound from the test
633 :param msg: The test message to be parsed.
635 :returns: Parsed data as a dict and the status (PASS/FAIL).
636 :rtype: tuple(dict, str)
644 groups = re.search(self.REGEX_PLR_RATE, msg)
646 if groups is not None:
648 throughput["LOWER"] = float(groups.group(1))
649 throughput["UPPER"] = float(groups.group(2))
651 except (IndexError, ValueError):
654 return throughput, status
656 def _get_ndrpdr_latency(self, msg):
657 """Get LATENCY from the test message.
659 :param msg: The test message to be parsed.
661 :returns: Parsed data as a dict and the status (PASS/FAIL).
662 :rtype: tuple(dict, str)
667 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
668 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
671 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
672 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
676 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
678 if groups is not None:
679 keys = ("min", "avg", "max")
681 latency["NDR"]["direction1"] = dict(
682 zip(keys, [float(l) for l in groups.group(1).split('/')]))
683 latency["NDR"]["direction2"] = dict(
684 zip(keys, [float(l) for l in groups.group(2).split('/')]))
685 latency["PDR"]["direction1"] = dict(
686 zip(keys, [float(l) for l in groups.group(3).split('/')]))
687 latency["PDR"]["direction2"] = dict(
688 zip(keys, [float(l) for l in groups.group(4).split('/')]))
690 except (IndexError, ValueError):
693 return latency, status
695 def visit_suite(self, suite):
696 """Implements traversing through the suite and its direct children.
698 :param suite: Suite to process.
702 if self.start_suite(suite) is not False:
703 suite.suites.visit(self)
704 suite.tests.visit(self)
705 self.end_suite(suite)
707 def start_suite(self, suite):
708 """Called when suite starts.
710 :param suite: Suite to process.
716 parent_name = suite.parent.name
717 except AttributeError:
720 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
721 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
722 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
724 self._data["suites"][suite.longname.lower().replace('"', "'").
725 replace(" ", "_")] = {
726 "name": suite.name.lower(),
728 "parent": parent_name,
729 "level": len(suite.longname.split("."))
732 suite.keywords.visit(self)
734 def end_suite(self, suite):
735 """Called when suite ends.
737 :param suite: Suite to process.
743 def visit_test(self, test):
744 """Implements traversing through the test.
746 :param test: Test to process.
750 if self.start_test(test) is not False:
751 test.keywords.visit(self)
754 def start_test(self, test):
755 """Called when test starts.
757 :param test: Test to process.
762 longname_orig = test.longname.lower()
764 # Check the ignore list
765 if longname_orig in self._ignore:
768 tags = [str(tag) for tag in test.tags]
771 # Change the TC long name and name if defined in the mapping table
772 longname = self._mapping.get(longname_orig, None)
773 if longname is not None:
774 name = longname.split('.')[-1]
775 logging.debug("{0}\n{1}\n{2}\n{3}".format(
776 self._data["metadata"], longname_orig, longname, name))
778 longname = longname_orig
779 name = test.name.lower()
781 # Remove TC number from the TC long name (backward compatibility):
782 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
783 # Remove TC number from the TC name (not needed):
784 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
786 test_result["parent"] = test.parent.name.lower()
787 test_result["tags"] = tags
788 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
789 replace('\r', '').replace('[', ' |br| [')
790 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
791 test_result["msg"] = test.message.replace('\n', ' |br| '). \
792 replace('\r', '').replace('"', "'")
793 test_result["type"] = "FUNC"
794 test_result["status"] = test.status
796 if "PERFTEST" in tags:
797 # Replace info about cores (e.g. -1c-) with the info about threads
798 # and cores (e.g. -1t1c-) in the long test case names and in the
799 # test case names if necessary.
800 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
803 for tag in test_result["tags"]:
804 groups = re.search(self.REGEX_TC_TAG, tag)
810 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
811 "-{0}-".format(tag_tc.lower()),
814 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
815 "-{0}-".format(tag_tc.lower()),
819 test_result["status"] = "FAIL"
820 self._data["tests"][self._test_ID] = test_result
821 logging.debug("The test '{0}' has no or more than one "
822 "multi-threading tags.".format(self._test_ID))
823 logging.debug("Tags: {0}".format(test_result["tags"]))
826 if test.status == "PASS" and ("NDRPDRDISC" in tags or
832 # TODO: Remove when definitely no NDRPDRDISC tests are used:
833 if "NDRDISC" in tags:
834 test_result["type"] = "NDR"
835 # TODO: Remove when definitely no NDRPDRDISC tests are used:
836 elif "PDRDISC" in tags:
837 test_result["type"] = "PDR"
838 elif "NDRPDR" in tags:
839 test_result["type"] = "NDRPDR"
841 test_result["type"] = "SOAK"
843 test_result["type"] = "TCP"
845 test_result["type"] = "MRR"
846 elif "FRMOBL" in tags or "BMRR" in tags:
847 test_result["type"] = "BMRR"
849 test_result["status"] = "FAIL"
850 self._data["tests"][self._test_ID] = test_result
853 # TODO: Remove when definitely no NDRPDRDISC tests are used:
854 if test_result["type"] in ("NDR", "PDR"):
856 rate_value = str(re.search(
857 self.REGEX_RATE, test.message).group(1))
858 except AttributeError:
861 rate_unit = str(re.search(
862 self.REGEX_RATE, test.message).group(2))
863 except AttributeError:
866 test_result["throughput"] = dict()
867 test_result["throughput"]["value"] = \
868 int(rate_value.split('.')[0])
869 test_result["throughput"]["unit"] = rate_unit
870 test_result["latency"] = \
871 self._get_latency(test.message, test_result["type"])
872 if test_result["type"] == "PDR":
873 test_result["lossTolerance"] = str(re.search(
874 self.REGEX_TOLERANCE, test.message).group(1))
876 elif test_result["type"] in ("NDRPDR", ):
877 test_result["throughput"], test_result["status"] = \
878 self._get_ndrpdr_throughput(test.message)
879 test_result["latency"], test_result["status"] = \
880 self._get_ndrpdr_latency(test.message)
882 elif test_result["type"] in ("SOAK", ):
883 test_result["throughput"], test_result["status"] = \
884 self._get_plr_throughput(test.message)
886 elif test_result["type"] in ("TCP", ):
887 groups = re.search(self.REGEX_TCP, test.message)
888 test_result["result"] = int(groups.group(2))
890 elif test_result["type"] in ("MRR", "BMRR"):
891 test_result["result"] = dict()
892 groups = re.search(self.REGEX_BMRR, test.message)
893 if groups is not None:
894 items_str = groups.group(1)
895 items_float = [float(item.strip()) for item
896 in items_str.split(",")]
897 metadata = AvgStdevMetadataFactory.from_data(items_float)
898 # Next two lines have been introduced in CSIT-1179,
899 # to be removed in CSIT-1180.
902 test_result["result"]["receive-rate"] = metadata
904 groups = re.search(self.REGEX_MRR, test.message)
905 test_result["result"]["receive-rate"] = \
906 AvgStdevMetadataFactory.from_data([
907 float(groups.group(3)) / float(groups.group(1)), ])
909 self._data["tests"][self._test_ID] = test_result
911 def end_test(self, test):
912 """Called when test ends.
914 :param test: Test to process.
920 def visit_keyword(self, keyword):
921 """Implements traversing through the keyword and its child keywords.
923 :param keyword: Keyword to process.
924 :type keyword: Keyword
927 if self.start_keyword(keyword) is not False:
928 self.end_keyword(keyword)
930 def start_keyword(self, keyword):
931 """Called when keyword starts. Default implementation does nothing.
933 :param keyword: Keyword to process.
934 :type keyword: Keyword
938 if keyword.type == "setup":
939 self.visit_setup_kw(keyword)
940 elif keyword.type == "teardown":
941 self._lookup_kw_nr = 0
942 self.visit_teardown_kw(keyword)
944 self._lookup_kw_nr = 0
945 self.visit_test_kw(keyword)
946 except AttributeError:
949 def end_keyword(self, keyword):
950 """Called when keyword ends. Default implementation does nothing.
952 :param keyword: Keyword to process.
953 :type keyword: Keyword
958 def visit_test_kw(self, test_kw):
959 """Implements traversing through the test keyword and its child
962 :param test_kw: Keyword to process.
963 :type test_kw: Keyword
966 for keyword in test_kw.keywords:
967 if self.start_test_kw(keyword) is not False:
968 self.visit_test_kw(keyword)
969 self.end_test_kw(keyword)
971 def start_test_kw(self, test_kw):
972 """Called when test keyword starts. Default implementation does
975 :param test_kw: Keyword to process.
976 :type test_kw: Keyword
979 if test_kw.name.count("Show Runtime Counters On All Duts"):
980 self._lookup_kw_nr += 1
981 self._show_run_lookup_nr = 0
982 self._msg_type = "test-show-runtime"
983 elif test_kw.name.count("Install Dpdk Test") and not self._version:
984 self._msg_type = "dpdk-version"
987 test_kw.messages.visit(self)
989 def end_test_kw(self, test_kw):
990 """Called when keyword ends. Default implementation does nothing.
992 :param test_kw: Keyword to process.
993 :type test_kw: Keyword
998 def visit_setup_kw(self, setup_kw):
999 """Implements traversing through the teardown keyword and its child
1002 :param setup_kw: Keyword to process.
1003 :type setup_kw: Keyword
1006 for keyword in setup_kw.keywords:
1007 if self.start_setup_kw(keyword) is not False:
1008 self.visit_setup_kw(keyword)
1009 self.end_setup_kw(keyword)
1011 def start_setup_kw(self, setup_kw):
1012 """Called when teardown keyword starts. Default implementation does
1015 :param setup_kw: Keyword to process.
1016 :type setup_kw: Keyword
1019 if setup_kw.name.count("Show Vpp Version On All Duts") \
1020 and not self._version:
1021 self._msg_type = "vpp-version"
1022 elif setup_kw.name.count("Set Global Variable") \
1023 and not self._timestamp:
1024 self._msg_type = "timestamp"
1025 elif setup_kw.name.count("Setup Framework") and not self._testbed:
1026 self._msg_type = "testbed"
1029 setup_kw.messages.visit(self)
1031 def end_setup_kw(self, setup_kw):
1032 """Called when keyword ends. Default implementation does nothing.
1034 :param setup_kw: Keyword to process.
1035 :type setup_kw: Keyword
1040 def visit_teardown_kw(self, teardown_kw):
1041 """Implements traversing through the teardown keyword and its child
1044 :param teardown_kw: Keyword to process.
1045 :type teardown_kw: Keyword
1048 for keyword in teardown_kw.keywords:
1049 if self.start_teardown_kw(keyword) is not False:
1050 self.visit_teardown_kw(keyword)
1051 self.end_teardown_kw(keyword)
1053 def start_teardown_kw(self, teardown_kw):
1054 """Called when teardown keyword starts. Default implementation does
1057 :param teardown_kw: Keyword to process.
1058 :type teardown_kw: Keyword
1062 if teardown_kw.name.count("Show Vat History On All Duts"):
1063 self._conf_history_lookup_nr = 0
1064 self._msg_type = "teardown-vat-history"
1065 teardown_kw.messages.visit(self)
1066 elif teardown_kw.name.count("Show Papi History On All Duts"):
1067 self._conf_history_lookup_nr = 0
1068 self._msg_type = "teardown-papi-history"
1069 teardown_kw.messages.visit(self)
1071 def end_teardown_kw(self, teardown_kw):
1072 """Called when keyword ends. Default implementation does nothing.
1074 :param teardown_kw: Keyword to process.
1075 :type teardown_kw: Keyword
1080 def visit_message(self, msg):
1081 """Implements visiting the message.
1083 :param msg: Message to process.
1087 if self.start_message(msg) is not False:
1088 self.end_message(msg)
1090 def start_message(self, msg):
1091 """Called when message starts. Get required information from messages:
1094 :param msg: Message to process.
1100 self.parse_msg[self._msg_type](msg)
1102 def end_message(self, msg):
1103 """Called when message ends. Default implementation does nothing.
1105 :param msg: Message to process.
1112 class InputData(object):
1115 The data is extracted from output.xml files generated by Jenkins jobs and
1116 stored in pandas' DataFrames.
1122 (as described in ExecutionChecker documentation)
1124 (as described in ExecutionChecker documentation)
1126 (as described in ExecutionChecker documentation)
1129 def __init__(self, spec):
1132 :param spec: Specification.
1133 :type spec: Specification
1140 self._input_data = pd.Series()
1144 """Getter - Input data.
1146 :returns: Input data
1147 :rtype: pandas.Series
1149 return self._input_data
1151 def metadata(self, job, build):
1152 """Getter - metadata
1154 :param job: Job which metadata we want.
1155 :param build: Build which metadata we want.
1159 :rtype: pandas.Series
1162 return self.data[job][build]["metadata"]
1164 def suites(self, job, build):
1167 :param job: Job which suites we want.
1168 :param build: Build which suites we want.
1172 :rtype: pandas.Series
1175 return self.data[job][str(build)]["suites"]
1177 def tests(self, job, build):
1180 :param job: Job which tests we want.
1181 :param build: Build which tests we want.
1185 :rtype: pandas.Series
1188 return self.data[job][build]["tests"]
1190 def _parse_tests(self, job, build, log):
1191 """Process data from robot output.xml file and return JSON structured
1194 :param job: The name of job which build output data will be processed.
1195 :param build: The build which output data will be processed.
1196 :param log: List of log messages.
1199 :type log: list of tuples (severity, msg)
1200 :returns: JSON data structure.
1209 with open(build["file-name"], 'r') as data_file:
1211 result = ExecutionResult(data_file)
1212 except errors.DataError as err:
1213 log.append(("ERROR", "Error occurred while parsing output.xml: "
1216 checker = ExecutionChecker(metadata, self._cfg.mapping,
1218 result.visit(checker)
1222 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1223 """Download and parse the input data file.
1225 :param pid: PID of the process executing this method.
1226 :param job: Name of the Jenkins job which generated the processed input
1228 :param build: Information about the Jenkins build which generated the
1229 processed input file.
1230 :param repeat: Repeat the download specified number of times if not
1240 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1241 format(job, build["build"])))
1248 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1254 logs.append(("ERROR", "It is not possible to download the input "
1255 "data file from the job '{job}', build "
1256 "'{build}', or it is damaged. Skipped.".
1257 format(job=job, build=build["build"])))
1259 logs.append(("INFO", " Processing data from the build '{0}' ...".
1260 format(build["build"])))
1261 data = self._parse_tests(job, build, logs)
1263 logs.append(("ERROR", "Input data file from the job '{job}', "
1264 "build '{build}' is damaged. Skipped.".
1265 format(job=job, build=build["build"])))
1270 remove(build["file-name"])
1271 except OSError as err:
1272 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1273 format(build["file-name"], repr(err))))
1275 # If the time-period is defined in the specification file, remove all
1276 # files which are outside the time period.
1277 timeperiod = self._cfg.input.get("time-period", None)
1278 if timeperiod and data:
1280 timeperiod = timedelta(int(timeperiod))
1281 metadata = data.get("metadata", None)
1283 generated = metadata.get("generated", None)
1285 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1286 if (now - generated) > timeperiod:
1287 # Remove the data and the file:
1292 " The build {job}/{build} is outdated, will be "
1293 "removed".format(job=job, build=build["build"])))
1294 file_name = self._cfg.input["file-name"]
1296 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1297 "{job}{sep}{build}{sep}{name}".format(
1300 build=build["build"],
1304 logs.append(("INFO",
1305 " The file {name} has been removed".
1306 format(name=full_name)))
1307 except OSError as err:
1308 logs.append(("ERROR",
1309 "Cannot remove the file '{0}': {1}".
1310 format(full_name, repr(err))))
1311 logs.append(("INFO", " Done."))
1313 for level, line in logs:
1316 elif level == "ERROR":
1318 elif level == "DEBUG":
1320 elif level == "CRITICAL":
1321 logging.critical(line)
1322 elif level == "WARNING":
1323 logging.warning(line)
1325 return {"data": data, "state": state, "job": job, "build": build}
1327 def download_and_parse_data(self, repeat=1):
1328 """Download the input data files, parse input data from input files and
1329 store in pandas' Series.
1331 :param repeat: Repeat the download specified number of times if not
1336 logging.info("Downloading and parsing input files ...")
1338 for job, builds in self._cfg.builds.items():
1339 for build in builds:
1341 result = self._download_and_parse_build(job, build, repeat)
1342 build_nr = result["build"]["build"]
1345 data = result["data"]
1346 build_data = pd.Series({
1347 "metadata": pd.Series(
1348 data["metadata"].values(),
1349 index=data["metadata"].keys()),
1350 "suites": pd.Series(data["suites"].values(),
1351 index=data["suites"].keys()),
1352 "tests": pd.Series(data["tests"].values(),
1353 index=data["tests"].keys())})
1355 if self._input_data.get(job, None) is None:
1356 self._input_data[job] = pd.Series()
1357 self._input_data[job][str(build_nr)] = build_data
1359 self._cfg.set_input_file_name(
1360 job, build_nr, result["build"]["file-name"])
1362 self._cfg.set_input_state(job, build_nr, result["state"])
1364 logging.info("Memory allocation: {0:,d}MB".format(
1365 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
1367 logging.info("Done.")
1370 def _end_of_tag(tag_filter, start=0, closer="'"):
1371 """Return the index of character in the string which is the end of tag.
1373 :param tag_filter: The string where the end of tag is being searched.
1374 :param start: The index where the searching is stated.
1375 :param closer: The character which is the tag closer.
1376 :type tag_filter: str
1379 :returns: The index of the tag closer.
1384 idx_opener = tag_filter.index(closer, start)
1385 return tag_filter.index(closer, idx_opener + 1)
1390 def _condition(tag_filter):
1391 """Create a conditional statement from the given tag filter.
1393 :param tag_filter: Filter based on tags from the element specification.
1394 :type tag_filter: str
1395 :returns: Conditional statement which can be evaluated.
1401 index = InputData._end_of_tag(tag_filter, index)
1405 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1407 def filter_data(self, element, params=None, data_set="tests",
1408 continue_on_error=False):
1409 """Filter required data from the given jobs and builds.
1411 The output data structure is:
1415 - test (or suite) 1 ID:
1421 - test (or suite) n ID:
1428 :param element: Element which will use the filtered data.
1429 :param params: Parameters which will be included in the output. If None,
1430 all parameters are included.
1431 :param data_set: The set of data to be filtered: tests, suites,
1433 :param continue_on_error: Continue if there is error while reading the
1434 data. The Item will be empty then
1435 :type element: pandas.Series
1438 :type continue_on_error: bool
1439 :returns: Filtered data.
1440 :rtype pandas.Series
1444 if element["filter"] in ("all", "template"):
1447 cond = InputData._condition(element["filter"])
1448 logging.debug(" Filter: {0}".format(cond))
1450 logging.error(" No filter defined.")
1454 params = element.get("parameters", None)
1456 params.append("type")
1460 for job, builds in element["data"].items():
1461 data[job] = pd.Series()
1462 for build in builds:
1463 data[job][str(build)] = pd.Series()
1465 data_iter = self.data[job][str(build)][data_set].\
1468 if continue_on_error:
1472 for test_ID, test_data in data_iter:
1473 if eval(cond, {"tags": test_data.get("tags", "")}):
1474 data[job][str(build)][test_ID] = pd.Series()
1476 for param, val in test_data.items():
1477 data[job][str(build)][test_ID][param] = val
1479 for param in params:
1481 data[job][str(build)][test_ID][param] =\
1484 data[job][str(build)][test_ID][param] =\
1488 except (KeyError, IndexError, ValueError) as err:
1489 logging.error(" Missing mandatory parameter in the element "
1490 "specification: {0}".format(err))
1492 except AttributeError:
1495 logging.error(" The filter '{0}' is not correct. Check if all "
1496 "tags are enclosed by apostrophes.".format(cond))
1500 def merge_data(data):
1501 """Merge data from more jobs and builds to a simple data structure.
1503 The output data structure is:
1505 - test (suite) 1 ID:
1511 - test (suite) n ID:
1514 :param data: Data to merge.
1515 :type data: pandas.Series
1516 :returns: Merged data.
1517 :rtype: pandas.Series
1520 logging.info(" Merging data ...")
1522 merged_data = pd.Series()
1523 for _, builds in data.iteritems():
1524 for _, item in builds.iteritems():
1525 for ID, item_data in item.iteritems():
1526 merged_data[ID] = item_data