1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from os.path import join
34 from datetime import datetime as dt
35 from datetime import timedelta
36 from json import loads
37 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
39 from input_data_files import download_and_unzip_data_file
40 from utils import Worker
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "vat-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
128 "parent": "Name of the parent of the test",
129 "doc": "Test documentation",
130 "msg": "Test message",
131 "tags": ["tag 1", "tag 2", "tag n"],
133 "status": "PASS" | "FAIL",
140 "parent": "Name of the parent of the test",
141 "doc": "Test documentation",
142 "msg": "Test message",
143 "tags": ["tag 1", "tag 2", "tag n"],
144 "type": "MRR" | "BMRR",
145 "status": "PASS" | "FAIL",
147 "receive-rate": AvgStdevMetadata,
151 # TODO: Remove when definitely no NDRPDRDISC tests are used:
155 "parent": "Name of the parent of the test",
156 "doc": "Test documentation",
157 "msg": "Test message",
158 "tags": ["tag 1", "tag 2", "tag n"],
159 "type": "PDR" | "NDR",
160 "status": "PASS" | "FAIL",
161 "throughput": { # Only type: "PDR" | "NDR"
163 "unit": "pps" | "bps" | "percentage"
165 "latency": { # Only type: "PDR" | "NDR"
172 "50": { # Only for NDR
177 "10": { # Only for NDR
189 "50": { # Only for NDR
194 "10": { # Only for NDR
201 "lossTolerance": "lossTolerance", # Only type: "PDR"
202 "vat-history": "DUT1 and DUT2 VAT History"
203 "show-run": "Show Run"
215 "metadata": { # Optional
216 "version": "VPP version",
217 "job": "Jenkins job name",
218 "build": "Information about the build"
222 "doc": "Suite 1 documentation",
223 "parent": "Suite 1 parent",
224 "level": "Level of the suite in the suite hierarchy"
227 "doc": "Suite N documentation",
228 "parent": "Suite 2 parent",
229 "level": "Level of the suite in the suite hierarchy"
235 "parent": "Name of the parent of the test",
236 "doc": "Test documentation"
237 "msg": "Test message"
238 "tags": ["tag 1", "tag 2", "tag n"],
239 "vat-history": "DUT1 and DUT2 VAT History"
240 "show-run": "Show Run"
241 "status": "PASS" | "FAIL"
249 .. note:: ID is the lowercase full path to the test.
252 # TODO: Remove when definitely no NDRPDRDISC tests are used:
253 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
255 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
256 r'NDR_UPPER:\s(\d+.\d+).*\n'
257 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
258 r'PDR_UPPER:\s(\d+.\d+)')
260 # TODO: Remove when definitely no NDRPDRDISC tests are used:
261 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
262 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
263 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
264 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
265 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
266 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
267 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
269 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
270 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
271 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
273 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
274 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
276 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
279 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
280 r"VPP Version:\s*)(.*)")
282 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
283 r"(RTE Version: 'DPDK )(.*)(')")
285 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
287 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
288 r'tx\s(\d*),\srx\s(\d*)')
290 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
291 r' in packets per second: \[(.*)\]')
293 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
295 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
297 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
299 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
301 def __init__(self, metadata, mapping, ignore):
304 :param metadata: Key-value pairs to be included in "metadata" part of
306 :param mapping: Mapping of the old names of test cases to the new
308 :param ignore: List of TCs to be ignored.
314 # Type of message to parse out from the test messages
315 self._msg_type = None
321 self._timestamp = None
323 # Testbed. The testbed is identified by TG node IP address.
326 # Mapping of TCs long names
327 self._mapping = mapping
330 self._ignore = ignore
332 # Number of VAT History messages found:
334 # 1 - VAT History of DUT1
335 # 2 - VAT History of DUT2
336 self._lookup_kw_nr = 0
337 self._vat_history_lookup_nr = 0
339 # Number of Show Running messages found
341 # 1 - Show run message found
342 self._show_run_lookup_nr = 0
344 # Test ID of currently processed test- the lowercase full path to the
348 # The main data structure
350 "metadata": OrderedDict(),
351 "suites": OrderedDict(),
352 "tests": OrderedDict()
355 # Save the provided metadata
356 for key, val in metadata.items():
357 self._data["metadata"][key] = val
359 # Dictionary defining the methods used to parse different types of
362 "timestamp": self._get_timestamp,
363 "vpp-version": self._get_vpp_version,
364 "dpdk-version": self._get_dpdk_version,
365 "teardown-vat-history": self._get_vat_history,
366 "test-show-runtime": self._get_show_run,
367 "testbed": self._get_testbed
372 """Getter - Data parsed from the XML file.
374 :returns: Data parsed from the XML file.
379 def _get_testbed(self, msg):
380 """Called when extraction of testbed IP is required.
381 The testbed is identified by TG node IP address.
383 :param msg: Message to process.
388 if msg.message.count("Arguments:"):
389 message = str(msg.message).replace(' ', '').replace('\n', '').\
390 replace("'", '"').replace('b"', '"').\
391 replace("honeycom", "honeycomb")
392 message = loads(message[11:-1])
394 self._testbed = message["TG"]["host"]
395 except (KeyError, ValueError):
398 self._data["metadata"]["testbed"] = self._testbed
399 self._msg_type = None
401 def _get_vpp_version(self, msg):
402 """Called when extraction of VPP version is required.
404 :param msg: Message to process.
409 if msg.message.count("return STDOUT Version:") or \
410 msg.message.count("VPP Version:"):
411 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
413 self._data["metadata"]["version"] = self._version
414 self._msg_type = None
416 def _get_dpdk_version(self, msg):
417 """Called when extraction of DPDK version is required.
419 :param msg: Message to process.
424 if msg.message.count("return STDOUT testpmd"):
426 self._version = str(re.search(
427 self.REGEX_VERSION_DPDK, msg.message). group(4))
428 self._data["metadata"]["version"] = self._version
432 self._msg_type = None
434 def _get_timestamp(self, msg):
435 """Called when extraction of timestamp is required.
437 :param msg: Message to process.
442 self._timestamp = msg.timestamp[:14]
443 self._data["metadata"]["generated"] = self._timestamp
444 self._msg_type = None
446 def _get_vat_history(self, msg):
447 """Called when extraction of VAT command history is required.
449 :param msg: Message to process.
453 if msg.message.count("VAT command history:"):
454 self._vat_history_lookup_nr += 1
455 if self._vat_history_lookup_nr == 1:
456 self._data["tests"][self._test_ID]["vat-history"] = str()
458 self._msg_type = None
459 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
460 "VAT command history:", "", msg.message, count=1). \
461 replace("\n\n", "\n").replace('\n', ' |br| ').\
462 replace('\r', '').replace('"', "'")
464 self._data["tests"][self._test_ID]["vat-history"] += " |br| "
465 self._data["tests"][self._test_ID]["vat-history"] += \
466 "**DUT" + str(self._vat_history_lookup_nr) + ":** " + text
468 def _get_show_run(self, msg):
469 """Called when extraction of VPP operational data (output of CLI command
470 Show Runtime) is required.
472 :param msg: Message to process.
476 if msg.message.count("return STDOUT Thread "):
477 self._show_run_lookup_nr += 1
478 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
479 self._data["tests"][self._test_ID]["show-run"] = str()
480 if self._lookup_kw_nr > 1:
481 self._msg_type = None
482 if self._show_run_lookup_nr == 1:
483 text = msg.message.replace("vat# ", "").\
484 replace("return STDOUT ", "").replace("\n\n", "\n").\
485 replace('\n', ' |br| ').\
486 replace('\r', '').replace('"', "'")
488 self._data["tests"][self._test_ID]["show-run"] += " |br| "
489 self._data["tests"][self._test_ID]["show-run"] += \
490 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
494 # TODO: Remove when definitely no NDRPDRDISC tests are used:
495 def _get_latency(self, msg, test_type):
496 """Get the latency data from the test message.
498 :param msg: Message to be parsed.
499 :param test_type: Type of the test - NDR or PDR.
502 :returns: Latencies parsed from the message.
506 if test_type == "NDR":
507 groups = re.search(self.REGEX_LAT_NDR, msg)
508 groups_range = range(1, 7)
509 elif test_type == "PDR":
510 groups = re.search(self.REGEX_LAT_PDR, msg)
511 groups_range = range(1, 3)
516 for idx in groups_range:
518 lat = [int(item) for item in str(groups.group(idx)).split('/')]
519 except (AttributeError, ValueError):
521 latencies.append(lat)
523 keys = ("min", "avg", "max")
531 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
532 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
533 if test_type == "NDR":
534 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
535 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
536 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
537 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
541 def _get_ndrpdr_throughput(self, msg):
542 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
545 :param msg: The test message to be parsed.
547 :returns: Parsed data as a dict and the status (PASS/FAIL).
548 :rtype: tuple(dict, str)
552 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
553 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
556 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
558 if groups is not None:
560 throughput["NDR"]["LOWER"] = float(groups.group(1))
561 throughput["NDR"]["UPPER"] = float(groups.group(2))
562 throughput["PDR"]["LOWER"] = float(groups.group(3))
563 throughput["PDR"]["UPPER"] = float(groups.group(4))
565 except (IndexError, ValueError):
568 return throughput, status
570 def _get_ndrpdr_latency(self, msg):
571 """Get LATENCY from the test message.
573 :param msg: The test message to be parsed.
575 :returns: Parsed data as a dict and the status (PASS/FAIL).
576 :rtype: tuple(dict, str)
581 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
582 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
585 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
586 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
590 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
592 if groups is not None:
593 keys = ("min", "avg", "max")
595 latency["NDR"]["direction1"] = dict(
596 zip(keys, [float(l) for l in groups.group(1).split('/')]))
597 latency["NDR"]["direction2"] = dict(
598 zip(keys, [float(l) for l in groups.group(2).split('/')]))
599 latency["PDR"]["direction1"] = dict(
600 zip(keys, [float(l) for l in groups.group(3).split('/')]))
601 latency["PDR"]["direction2"] = dict(
602 zip(keys, [float(l) for l in groups.group(4).split('/')]))
604 except (IndexError, ValueError):
607 return latency, status
609 def visit_suite(self, suite):
610 """Implements traversing through the suite and its direct children.
612 :param suite: Suite to process.
616 if self.start_suite(suite) is not False:
617 suite.suites.visit(self)
618 suite.tests.visit(self)
619 self.end_suite(suite)
621 def start_suite(self, suite):
622 """Called when suite starts.
624 :param suite: Suite to process.
630 parent_name = suite.parent.name
631 except AttributeError:
634 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
635 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
636 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
638 self._data["suites"][suite.longname.lower().replace('"', "'").
639 replace(" ", "_")] = {
640 "name": suite.name.lower(),
642 "parent": parent_name,
643 "level": len(suite.longname.split("."))
646 suite.keywords.visit(self)
648 def end_suite(self, suite):
649 """Called when suite ends.
651 :param suite: Suite to process.
657 def visit_test(self, test):
658 """Implements traversing through the test.
660 :param test: Test to process.
664 if self.start_test(test) is not False:
665 test.keywords.visit(self)
668 def start_test(self, test):
669 """Called when test starts.
671 :param test: Test to process.
676 longname_orig = test.longname.lower()
678 # Check the ignore list
679 if longname_orig in self._ignore:
682 tags = [str(tag) for tag in test.tags]
685 # Change the TC long name and name if defined in the mapping table
686 longname = self._mapping.get(longname_orig, None)
687 if longname is not None:
688 name = longname.split('.')[-1]
689 logging.debug("{0}\n{1}\n{2}\n{3}".format(
690 self._data["metadata"], longname_orig, longname, name))
692 longname = longname_orig
693 name = test.name.lower()
695 # Remove TC number from the TC long name (backward compatibility):
696 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
697 # Remove TC number from the TC name (not needed):
698 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
700 test_result["parent"] = test.parent.name.lower()
701 test_result["tags"] = tags
702 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
703 replace('\r', '').replace('[', ' |br| [')
704 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
705 test_result["msg"] = test.message.replace('\n', ' |br| '). \
706 replace('\r', '').replace('"', "'")
707 test_result["type"] = "FUNC"
708 test_result["status"] = test.status
710 if "PERFTEST" in tags:
711 # Replace info about cores (e.g. -1c-) with the info about threads
712 # and cores (e.g. -1t1c-) in the long test case names and in the
713 # test case names if necessary.
714 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
717 for tag in test_result["tags"]:
718 groups = re.search(self.REGEX_TC_TAG, tag)
724 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
725 "-{0}-".format(tag_tc.lower()),
728 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
729 "-{0}-".format(tag_tc.lower()),
733 test_result["status"] = "FAIL"
734 self._data["tests"][self._test_ID] = test_result
735 logging.debug("The test '{0}' has no or more than one "
736 "multi-threading tags.".format(self._test_ID))
737 logging.debug("Tags: {0}".format(test_result["tags"]))
740 if test.status == "PASS" and ("NDRPDRDISC" in tags or
745 # TODO: Remove when definitely no NDRPDRDISC tests are used:
746 if "NDRDISC" in tags:
747 test_result["type"] = "NDR"
748 # TODO: Remove when definitely no NDRPDRDISC tests are used:
749 elif "PDRDISC" in tags:
750 test_result["type"] = "PDR"
751 elif "NDRPDR" in tags:
752 test_result["type"] = "NDRPDR"
754 test_result["type"] = "TCP"
756 test_result["type"] = "MRR"
757 elif "FRMOBL" in tags or "BMRR" in tags:
758 test_result["type"] = "BMRR"
760 test_result["status"] = "FAIL"
761 self._data["tests"][self._test_ID] = test_result
764 # TODO: Remove when definitely no NDRPDRDISC tests are used:
765 if test_result["type"] in ("NDR", "PDR"):
767 rate_value = str(re.search(
768 self.REGEX_RATE, test.message).group(1))
769 except AttributeError:
772 rate_unit = str(re.search(
773 self.REGEX_RATE, test.message).group(2))
774 except AttributeError:
777 test_result["throughput"] = dict()
778 test_result["throughput"]["value"] = \
779 int(rate_value.split('.')[0])
780 test_result["throughput"]["unit"] = rate_unit
781 test_result["latency"] = \
782 self._get_latency(test.message, test_result["type"])
783 if test_result["type"] == "PDR":
784 test_result["lossTolerance"] = str(re.search(
785 self.REGEX_TOLERANCE, test.message).group(1))
787 elif test_result["type"] in ("NDRPDR", ):
788 test_result["throughput"], test_result["status"] = \
789 self._get_ndrpdr_throughput(test.message)
790 test_result["latency"], test_result["status"] = \
791 self._get_ndrpdr_latency(test.message)
793 elif test_result["type"] in ("TCP", ):
794 groups = re.search(self.REGEX_TCP, test.message)
795 test_result["result"] = int(groups.group(2))
797 elif test_result["type"] in ("MRR", "BMRR"):
798 test_result["result"] = dict()
799 groups = re.search(self.REGEX_BMRR, test.message)
800 if groups is not None:
801 items_str = groups.group(1)
802 items_float = [float(item.strip()) for item
803 in items_str.split(",")]
804 metadata = AvgStdevMetadataFactory.from_data(items_float)
805 test_result["result"]["receive-rate"] = metadata
807 groups = re.search(self.REGEX_MRR, test.message)
808 test_result["result"]["receive-rate"] = \
809 AvgStdevMetadataFactory.from_data([
810 float(groups.group(3)) / float(groups.group(1)), ])
812 self._data["tests"][self._test_ID] = test_result
814 def end_test(self, test):
815 """Called when test ends.
817 :param test: Test to process.
823 def visit_keyword(self, keyword):
824 """Implements traversing through the keyword and its child keywords.
826 :param keyword: Keyword to process.
827 :type keyword: Keyword
830 if self.start_keyword(keyword) is not False:
831 self.end_keyword(keyword)
833 def start_keyword(self, keyword):
834 """Called when keyword starts. Default implementation does nothing.
836 :param keyword: Keyword to process.
837 :type keyword: Keyword
841 if keyword.type == "setup":
842 self.visit_setup_kw(keyword)
843 elif keyword.type == "teardown":
844 self._lookup_kw_nr = 0
845 self.visit_teardown_kw(keyword)
847 self._lookup_kw_nr = 0
848 self.visit_test_kw(keyword)
849 except AttributeError:
852 def end_keyword(self, keyword):
853 """Called when keyword ends. Default implementation does nothing.
855 :param keyword: Keyword to process.
856 :type keyword: Keyword
861 def visit_test_kw(self, test_kw):
862 """Implements traversing through the test keyword and its child
865 :param test_kw: Keyword to process.
866 :type test_kw: Keyword
869 for keyword in test_kw.keywords:
870 if self.start_test_kw(keyword) is not False:
871 self.visit_test_kw(keyword)
872 self.end_test_kw(keyword)
874 def start_test_kw(self, test_kw):
875 """Called when test keyword starts. Default implementation does
878 :param test_kw: Keyword to process.
879 :type test_kw: Keyword
882 if test_kw.name.count("Show Runtime Counters On All Duts"):
883 self._lookup_kw_nr += 1
884 self._show_run_lookup_nr = 0
885 self._msg_type = "test-show-runtime"
886 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
887 self._msg_type = "dpdk-version"
890 test_kw.messages.visit(self)
892 def end_test_kw(self, test_kw):
893 """Called when keyword ends. Default implementation does nothing.
895 :param test_kw: Keyword to process.
896 :type test_kw: Keyword
901 def visit_setup_kw(self, setup_kw):
902 """Implements traversing through the teardown keyword and its child
905 :param setup_kw: Keyword to process.
906 :type setup_kw: Keyword
909 for keyword in setup_kw.keywords:
910 if self.start_setup_kw(keyword) is not False:
911 self.visit_setup_kw(keyword)
912 self.end_setup_kw(keyword)
914 def start_setup_kw(self, setup_kw):
915 """Called when teardown keyword starts. Default implementation does
918 :param setup_kw: Keyword to process.
919 :type setup_kw: Keyword
922 if setup_kw.name.count("Show Vpp Version On All Duts") \
923 and not self._version:
924 self._msg_type = "vpp-version"
926 elif setup_kw.name.count("Setup performance global Variables") \
927 and not self._timestamp:
928 self._msg_type = "timestamp"
929 elif setup_kw.name.count("Setup Framework") and not self._testbed:
930 self._msg_type = "testbed"
933 setup_kw.messages.visit(self)
935 def end_setup_kw(self, setup_kw):
936 """Called when keyword ends. Default implementation does nothing.
938 :param setup_kw: Keyword to process.
939 :type setup_kw: Keyword
944 def visit_teardown_kw(self, teardown_kw):
945 """Implements traversing through the teardown keyword and its child
948 :param teardown_kw: Keyword to process.
949 :type teardown_kw: Keyword
952 for keyword in teardown_kw.keywords:
953 if self.start_teardown_kw(keyword) is not False:
954 self.visit_teardown_kw(keyword)
955 self.end_teardown_kw(keyword)
957 def start_teardown_kw(self, teardown_kw):
958 """Called when teardown keyword starts. Default implementation does
961 :param teardown_kw: Keyword to process.
962 :type teardown_kw: Keyword
966 if teardown_kw.name.count("Show Vat History On All Duts"):
967 self._vat_history_lookup_nr = 0
968 self._msg_type = "teardown-vat-history"
969 teardown_kw.messages.visit(self)
971 def end_teardown_kw(self, teardown_kw):
972 """Called when keyword ends. Default implementation does nothing.
974 :param teardown_kw: Keyword to process.
975 :type teardown_kw: Keyword
980 def visit_message(self, msg):
981 """Implements visiting the message.
983 :param msg: Message to process.
987 if self.start_message(msg) is not False:
988 self.end_message(msg)
990 def start_message(self, msg):
991 """Called when message starts. Get required information from messages:
994 :param msg: Message to process.
1000 self.parse_msg[self._msg_type](msg)
1002 def end_message(self, msg):
1003 """Called when message ends. Default implementation does nothing.
1005 :param msg: Message to process.
1012 class InputData(object):
1015 The data is extracted from output.xml files generated by Jenkins jobs and
1016 stored in pandas' DataFrames.
1022 (as described in ExecutionChecker documentation)
1024 (as described in ExecutionChecker documentation)
1026 (as described in ExecutionChecker documentation)
1029 def __init__(self, spec):
1032 :param spec: Specification.
1033 :type spec: Specification
1040 self._input_data = pd.Series()
1044 """Getter - Input data.
1046 :returns: Input data
1047 :rtype: pandas.Series
1049 return self._input_data
1051 def metadata(self, job, build):
1052 """Getter - metadata
1054 :param job: Job which metadata we want.
1055 :param build: Build which metadata we want.
1059 :rtype: pandas.Series
1062 return self.data[job][build]["metadata"]
1064 def suites(self, job, build):
1067 :param job: Job which suites we want.
1068 :param build: Build which suites we want.
1072 :rtype: pandas.Series
1075 return self.data[job][str(build)]["suites"]
1077 def tests(self, job, build):
1080 :param job: Job which tests we want.
1081 :param build: Build which tests we want.
1085 :rtype: pandas.Series
1088 return self.data[job][build]["tests"]
1090 def _parse_tests(self, job, build, log):
1091 """Process data from robot output.xml file and return JSON structured
1094 :param job: The name of job which build output data will be processed.
1095 :param build: The build which output data will be processed.
1096 :param log: List of log messages.
1099 :type log: list of tuples (severity, msg)
1100 :returns: JSON data structure.
1109 with open(build["file-name"], 'r') as data_file:
1111 result = ExecutionResult(data_file)
1112 except errors.DataError as err:
1113 log.append(("ERROR", "Error occurred while parsing output.xml: "
1116 checker = ExecutionChecker(metadata, self._cfg.mapping,
1118 result.visit(checker)
1122 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1123 """Download and parse the input data file.
1125 :param pid: PID of the process executing this method.
1126 :param data_queue: Shared memory between processes. Queue which keeps
1127 the result data. This data is then read by the main process and used
1128 in further processing.
1129 :param job: Name of the Jenkins job which generated the processed input
1131 :param build: Information about the Jenkins build which generated the
1132 processed input file.
1133 :param repeat: Repeat the download specified number of times if not
1136 :type data_queue: multiprocessing.Manager().Queue()
1144 logging.info(" Processing the job/build: {0}: {1}".
1145 format(job, build["build"]))
1147 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1148 format(job, build["build"])))
1155 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1161 logs.append(("ERROR", "It is not possible to download the input "
1162 "data file from the job '{job}', build "
1163 "'{build}', or it is damaged. Skipped.".
1164 format(job=job, build=build["build"])))
1166 logs.append(("INFO", " Processing data from the build '{0}' ...".
1167 format(build["build"])))
1168 data = self._parse_tests(job, build, logs)
1170 logs.append(("ERROR", "Input data file from the job '{job}', "
1171 "build '{build}' is damaged. Skipped.".
1172 format(job=job, build=build["build"])))
1177 remove(build["file-name"])
1178 except OSError as err:
1179 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1180 format(build["file-name"], repr(err))))
1182 # If the time-period is defined in the specification file, remove all
1183 # files which are outside the time period.
1184 timeperiod = self._cfg.input.get("time-period", None)
1185 if timeperiod and data:
1187 timeperiod = timedelta(int(timeperiod))
1188 metadata = data.get("metadata", None)
1190 generated = metadata.get("generated", None)
1192 generated = dt.strptime(generated, "%Y%m%d %H:%M")
1193 if (now - generated) > timeperiod:
1194 # Remove the data and the file:
1199 " The build {job}/{build} is outdated, will be "
1200 "removed".format(job=job, build=build["build"])))
1201 file_name = self._cfg.input["file-name"]
1203 self._cfg.environment["paths"]["DIR[WORKING,DATA]"],
1204 "{job}{sep}{build}{sep}{name}".
1207 build=build["build"],
1211 logs.append(("INFO",
1212 " The file {name} has been removed".
1213 format(name=full_name)))
1214 except OSError as err:
1215 logs.append(("ERROR",
1216 "Cannot remove the file '{0}': {1}".
1217 format(full_name, repr(err))))
1219 logs.append(("INFO", " Done."))
1228 data_queue.put(result)
1230 def download_and_parse_data(self, repeat=1):
1231 """Download the input data files, parse input data from input files and
1232 store in pandas' Series.
1234 :param repeat: Repeat the download specified number of times if not
1239 logging.info("Downloading and parsing input files ...")
1241 work_queue = multiprocessing.JoinableQueue()
1242 manager = multiprocessing.Manager()
1243 data_queue = manager.Queue()
1244 cpus = multiprocessing.cpu_count()
1247 for cpu in range(cpus):
1248 worker = Worker(work_queue,
1250 self._download_and_parse_build)
1251 worker.daemon = True
1253 workers.append(worker)
1254 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1255 format(cpu, worker.pid))
1257 for job, builds in self._cfg.builds.items():
1258 for build in builds:
1259 work_queue.put((job, build, repeat))
1263 logging.info("Done.")
1265 while not data_queue.empty():
1266 result = data_queue.get()
1269 build_nr = result["build"]["build"]
1272 data = result["data"]
1273 build_data = pd.Series({
1274 "metadata": pd.Series(data["metadata"].values(),
1275 index=data["metadata"].keys()),
1276 "suites": pd.Series(data["suites"].values(),
1277 index=data["suites"].keys()),
1278 "tests": pd.Series(data["tests"].values(),
1279 index=data["tests"].keys())})
1281 if self._input_data.get(job, None) is None:
1282 self._input_data[job] = pd.Series()
1283 self._input_data[job][str(build_nr)] = build_data
1285 self._cfg.set_input_file_name(job, build_nr,
1286 result["build"]["file-name"])
1288 self._cfg.set_input_state(job, build_nr, result["state"])
1290 for item in result["logs"]:
1291 if item[0] == "INFO":
1292 logging.info(item[1])
1293 elif item[0] == "ERROR":
1294 logging.error(item[1])
1295 elif item[0] == "DEBUG":
1296 logging.debug(item[1])
1297 elif item[0] == "CRITICAL":
1298 logging.critical(item[1])
1299 elif item[0] == "WARNING":
1300 logging.warning(item[1])
1304 # Terminate all workers
1305 for worker in workers:
1309 logging.info("Done.")
1312 def _end_of_tag(tag_filter, start=0, closer="'"):
1313 """Return the index of character in the string which is the end of tag.
1315 :param tag_filter: The string where the end of tag is being searched.
1316 :param start: The index where the searching is stated.
1317 :param closer: The character which is the tag closer.
1318 :type tag_filter: str
1321 :returns: The index of the tag closer.
1326 idx_opener = tag_filter.index(closer, start)
1327 return tag_filter.index(closer, idx_opener + 1)
1332 def _condition(tag_filter):
1333 """Create a conditional statement from the given tag filter.
1335 :param tag_filter: Filter based on tags from the element specification.
1336 :type tag_filter: str
1337 :returns: Conditional statement which can be evaluated.
1343 index = InputData._end_of_tag(tag_filter, index)
1347 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1349 def filter_data(self, element, params=None, data_set="tests",
1350 continue_on_error=False):
1351 """Filter required data from the given jobs and builds.
1353 The output data structure is:
1357 - test (or suite) 1 ID:
1363 - test (or suite) n ID:
1370 :param element: Element which will use the filtered data.
1371 :param params: Parameters which will be included in the output. If None,
1372 all parameters are included.
1373 :param data_set: The set of data to be filtered: tests, suites,
1375 :param continue_on_error: Continue if there is error while reading the
1376 data. The Item will be empty then
1377 :type element: pandas.Series
1380 :type continue_on_error: bool
1381 :returns: Filtered data.
1382 :rtype pandas.Series
1386 if element["filter"] in ("all", "template"):
1389 cond = InputData._condition(element["filter"])
1390 logging.debug(" Filter: {0}".format(cond))
1392 logging.error(" No filter defined.")
1396 params = element.get("parameters", None)
1398 params.append("type")
1402 for job, builds in element["data"].items():
1403 data[job] = pd.Series()
1404 for build in builds:
1405 data[job][str(build)] = pd.Series()
1407 data_iter = self.data[job][str(build)][data_set].\
1410 if continue_on_error:
1414 for test_ID, test_data in data_iter:
1415 if eval(cond, {"tags": test_data.get("tags", "")}):
1416 data[job][str(build)][test_ID] = pd.Series()
1418 for param, val in test_data.items():
1419 data[job][str(build)][test_ID][param] = val
1421 for param in params:
1423 data[job][str(build)][test_ID][param] =\
1426 data[job][str(build)][test_ID][param] =\
1430 except (KeyError, IndexError, ValueError) as err:
1431 logging.error(" Missing mandatory parameter in the element "
1432 "specification: {0}".format(err))
1434 except AttributeError:
1437 logging.error(" The filter '{0}' is not correct. Check if all "
1438 "tags are enclosed by apostrophes.".format(cond))
1442 def merge_data(data):
1443 """Merge data from more jobs and builds to a simple data structure.
1445 The output data structure is:
1447 - test (suite) 1 ID:
1453 - test (suite) n ID:
1456 :param data: Data to merge.
1457 :type data: pandas.Series
1458 :returns: Merged data.
1459 :rtype: pandas.Series
1462 logging.info(" Merging data ...")
1464 merged_data = pd.Series()
1465 for _, builds in data.iteritems():
1466 for _, item in builds.iteritems():
1467 for ID, item_data in item.iteritems():
1468 merged_data[ID] = item_data