1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
22 import multiprocessing
28 from robot.api import ExecutionResult, ResultVisitor
29 from robot import errors
30 from collections import OrderedDict
31 from string import replace
33 from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
35 from input_data_files import download_and_unzip_data_file
36 from utils import Worker
39 class ExecutionChecker(ResultVisitor):
40 """Class to traverse through the test suite structure.
42 The functionality implemented in this class generates a json structure:
48 "generated": "Timestamp",
49 "version": "SUT version",
50 "job": "Jenkins job name",
51 "build": "Information about the build"
54 "Suite long name 1": {
56 "doc": "Suite 1 documentation",
57 "parent": "Suite 1 parent",
58 "level": "Level of the suite in the suite hierarchy"
60 "Suite long name N": {
62 "doc": "Suite N documentation",
63 "parent": "Suite 2 parent",
64 "level": "Level of the suite in the suite hierarchy"
71 "parent": "Name of the parent of the test",
72 "doc": "Test documentation",
73 "msg": "Test message",
74 "vat-history": "DUT1 and DUT2 VAT History",
75 "show-run": "Show Run",
76 "tags": ["tag 1", "tag 2", "tag n"],
78 "status": "PASS" | "FAIL",
120 "parent": "Name of the parent of the test",
121 "doc": "Test documentation",
122 "msg": "Test message",
123 "tags": ["tag 1", "tag 2", "tag n"],
125 "status": "PASS" | "FAIL",
132 "parent": "Name of the parent of the test",
133 "doc": "Test documentation",
134 "msg": "Test message",
135 "tags": ["tag 1", "tag 2", "tag n"],
136 "type": "MRR" | "BMRR",
137 "status": "PASS" | "FAIL",
139 "receive-rate": AvgStdevMetadata,
143 # TODO: Remove when definitely no NDRPDRDISC tests are used:
147 "parent": "Name of the parent of the test",
148 "doc": "Test documentation",
149 "msg": "Test message",
150 "tags": ["tag 1", "tag 2", "tag n"],
151 "type": "PDR" | "NDR",
152 "status": "PASS" | "FAIL",
153 "throughput": { # Only type: "PDR" | "NDR"
155 "unit": "pps" | "bps" | "percentage"
157 "latency": { # Only type: "PDR" | "NDR"
164 "50": { # Only for NDR
169 "10": { # Only for NDR
181 "50": { # Only for NDR
186 "10": { # Only for NDR
193 "lossTolerance": "lossTolerance", # Only type: "PDR"
194 "vat-history": "DUT1 and DUT2 VAT History"
195 "show-run": "Show Run"
207 "metadata": { # Optional
208 "version": "VPP version",
209 "job": "Jenkins job name",
210 "build": "Information about the build"
214 "doc": "Suite 1 documentation",
215 "parent": "Suite 1 parent",
216 "level": "Level of the suite in the suite hierarchy"
219 "doc": "Suite N documentation",
220 "parent": "Suite 2 parent",
221 "level": "Level of the suite in the suite hierarchy"
227 "parent": "Name of the parent of the test",
228 "doc": "Test documentation"
229 "msg": "Test message"
230 "tags": ["tag 1", "tag 2", "tag n"],
231 "vat-history": "DUT1 and DUT2 VAT History"
232 "show-run": "Show Run"
233 "status": "PASS" | "FAIL"
241 .. note:: ID is the lowercase full path to the test.
244 # TODO: Remove when definitely no NDRPDRDISC tests are used:
245 REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)')
247 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
248 r'NDR_UPPER:\s(\d+.\d+).*\n'
249 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
250 r'PDR_UPPER:\s(\d+.\d+)')
252 # TODO: Remove when definitely no NDRPDRDISC tests are used:
253 REGEX_LAT_NDR = re.compile(r'^[\D\d]*'
254 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
255 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
256 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
257 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n'
258 r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
259 r'\s\'(-?\d+/-?\d+/-?\d+)\'\]')
261 REGEX_LAT_PDR = re.compile(r'^[\D\d]*'
262 r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\','
263 r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*')
265 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
266 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
268 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
271 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*)(.*)")
273 REGEX_VERSION_DPDK = re.compile(r"(return STDOUT testpmd)([\d\D\n]*)"
274 r"(RTE Version: 'DPDK )(.*)(')")
276 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
278 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
279 r'tx\s(\d*),\srx\s(\d*)')
281 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
282 r' in packets per second: \[(.*)\]')
284 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
286 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
288 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
290 REGEX_TC_NUMBER = re.compile(r'tc[0-9]{2}-')
292 def __init__(self, metadata, mapping, ignore):
295 :param metadata: Key-value pairs to be included in "metadata" part of
297 :param mapping: Mapping of the old names of test cases to the new
299 :param ignore: List of TCs to be ignored.
305 # Type of message to parse out from the test messages
306 self._msg_type = None
312 self._timestamp = None
314 # Mapping of TCs long names
315 self._mapping = mapping
318 self._ignore = ignore
320 # Number of VAT History messages found:
322 # 1 - VAT History of DUT1
323 # 2 - VAT History of DUT2
324 self._lookup_kw_nr = 0
325 self._vat_history_lookup_nr = 0
327 # Number of Show Running messages found
329 # 1 - Show run message found
330 self._show_run_lookup_nr = 0
332 # Test ID of currently processed test- the lowercase full path to the
336 # The main data structure
338 "metadata": OrderedDict(),
339 "suites": OrderedDict(),
340 "tests": OrderedDict()
343 # Save the provided metadata
344 for key, val in metadata.items():
345 self._data["metadata"][key] = val
347 # Dictionary defining the methods used to parse different types of
350 "timestamp": self._get_timestamp,
351 "vpp-version": self._get_vpp_version,
352 "dpdk-version": self._get_dpdk_version,
353 "teardown-vat-history": self._get_vat_history,
354 "test-show-runtime": self._get_show_run
359 """Getter - Data parsed from the XML file.
361 :returns: Data parsed from the XML file.
366 def _get_vpp_version(self, msg):
367 """Called when extraction of VPP version is required.
369 :param msg: Message to process.
374 if msg.message.count("return STDOUT Version:"):
375 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
377 self._data["metadata"]["version"] = self._version
378 self._msg_type = None
380 def _get_dpdk_version(self, msg):
381 """Called when extraction of DPDK version is required.
383 :param msg: Message to process.
388 if msg.message.count("return STDOUT testpmd"):
390 self._version = str(re.search(
391 self.REGEX_VERSION_DPDK, msg.message). group(4))
392 self._data["metadata"]["version"] = self._version
396 self._msg_type = None
398 def _get_timestamp(self, msg):
399 """Called when extraction of timestamp is required.
401 :param msg: Message to process.
406 self._timestamp = msg.timestamp[:14]
407 self._data["metadata"]["generated"] = self._timestamp
408 self._msg_type = None
410 def _get_vat_history(self, msg):
411 """Called when extraction of VAT command history is required.
413 :param msg: Message to process.
417 if msg.message.count("VAT command history:"):
418 self._vat_history_lookup_nr += 1
419 if self._vat_history_lookup_nr == 1:
420 self._data["tests"][self._test_ID]["vat-history"] = str()
422 self._msg_type = None
423 text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} "
424 "VAT command history:", "", msg.message, count=1). \
425 replace("\n\n", "\n").replace('\n', ' |br| ').\
426 replace('\r', '').replace('"', "'")
428 self._data["tests"][self._test_ID]["vat-history"] += " |br| "
429 self._data["tests"][self._test_ID]["vat-history"] += \
430 "**DUT" + str(self._vat_history_lookup_nr) + ":** " + text
432 def _get_show_run(self, msg):
433 """Called when extraction of VPP operational data (output of CLI command
434 Show Runtime) is required.
436 :param msg: Message to process.
440 if msg.message.count("return STDOUT Thread "):
441 self._show_run_lookup_nr += 1
442 if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1:
443 self._data["tests"][self._test_ID]["show-run"] = str()
444 if self._lookup_kw_nr > 1:
445 self._msg_type = None
446 if self._show_run_lookup_nr == 1:
447 text = msg.message.replace("vat# ", "").\
448 replace("return STDOUT ", "").replace("\n\n", "\n").\
449 replace('\n', ' |br| ').\
450 replace('\r', '').replace('"', "'")
452 self._data["tests"][self._test_ID]["show-run"] += " |br| "
453 self._data["tests"][self._test_ID]["show-run"] += \
454 "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text
458 # TODO: Remove when definitely no NDRPDRDISC tests are used:
459 def _get_latency(self, msg, test_type):
460 """Get the latency data from the test message.
462 :param msg: Message to be parsed.
463 :param test_type: Type of the test - NDR or PDR.
466 :returns: Latencies parsed from the message.
470 if test_type == "NDR":
471 groups = re.search(self.REGEX_LAT_NDR, msg)
472 groups_range = range(1, 7)
473 elif test_type == "PDR":
474 groups = re.search(self.REGEX_LAT_PDR, msg)
475 groups_range = range(1, 3)
480 for idx in groups_range:
482 lat = [int(item) for item in str(groups.group(idx)).split('/')]
483 except (AttributeError, ValueError):
485 latencies.append(lat)
487 keys = ("min", "avg", "max")
495 latency["direction1"]["100"] = dict(zip(keys, latencies[0]))
496 latency["direction2"]["100"] = dict(zip(keys, latencies[1]))
497 if test_type == "NDR":
498 latency["direction1"]["50"] = dict(zip(keys, latencies[2]))
499 latency["direction2"]["50"] = dict(zip(keys, latencies[3]))
500 latency["direction1"]["10"] = dict(zip(keys, latencies[4]))
501 latency["direction2"]["10"] = dict(zip(keys, latencies[5]))
505 def _get_ndrpdr_throughput(self, msg):
506 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
509 :param msg: The test message to be parsed.
511 :returns: Parsed data as a dict and the status (PASS/FAIL).
512 :rtype: tuple(dict, str)
516 "NDR": {"LOWER": -1.0, "UPPER": -1.0},
517 "PDR": {"LOWER": -1.0, "UPPER": -1.0}
520 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
522 if groups is not None:
524 throughput["NDR"]["LOWER"] = float(groups.group(1))
525 throughput["NDR"]["UPPER"] = float(groups.group(2))
526 throughput["PDR"]["LOWER"] = float(groups.group(3))
527 throughput["PDR"]["UPPER"] = float(groups.group(4))
529 except (IndexError, ValueError):
532 return throughput, status
534 def _get_ndrpdr_latency(self, msg):
535 """Get LATENCY from the test message.
537 :param msg: The test message to be parsed.
539 :returns: Parsed data as a dict and the status (PASS/FAIL).
540 :rtype: tuple(dict, str)
545 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
546 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
549 "direction1": {"min": -1.0, "avg": -1.0, "max": -1.0},
550 "direction2": {"min": -1.0, "avg": -1.0, "max": -1.0}
554 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
556 if groups is not None:
557 keys = ("min", "avg", "max")
559 latency["NDR"]["direction1"] = dict(
560 zip(keys, [float(l) for l in groups.group(1).split('/')]))
561 latency["NDR"]["direction2"] = dict(
562 zip(keys, [float(l) for l in groups.group(2).split('/')]))
563 latency["PDR"]["direction1"] = dict(
564 zip(keys, [float(l) for l in groups.group(3).split('/')]))
565 latency["PDR"]["direction2"] = dict(
566 zip(keys, [float(l) for l in groups.group(4).split('/')]))
568 except (IndexError, ValueError):
571 return latency, status
573 def visit_suite(self, suite):
574 """Implements traversing through the suite and its direct children.
576 :param suite: Suite to process.
580 if self.start_suite(suite) is not False:
581 suite.suites.visit(self)
582 suite.tests.visit(self)
583 self.end_suite(suite)
585 def start_suite(self, suite):
586 """Called when suite starts.
588 :param suite: Suite to process.
594 parent_name = suite.parent.name
595 except AttributeError:
598 doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\
599 replace('\r', '').replace('*[', ' |br| *[').replace("*", "**")
600 doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1)
602 self._data["suites"][suite.longname.lower().replace('"', "'").
603 replace(" ", "_")] = {
604 "name": suite.name.lower(),
606 "parent": parent_name,
607 "level": len(suite.longname.split("."))
610 suite.keywords.visit(self)
612 def end_suite(self, suite):
613 """Called when suite ends.
615 :param suite: Suite to process.
621 def visit_test(self, test):
622 """Implements traversing through the test.
624 :param test: Test to process.
628 if self.start_test(test) is not False:
629 test.keywords.visit(self)
632 def start_test(self, test):
633 """Called when test starts.
635 :param test: Test to process.
640 longname_orig = test.longname.lower()
642 # Check the ignore list
643 if longname_orig in self._ignore:
646 tags = [str(tag) for tag in test.tags]
649 # Change the TC long name and name if defined in the mapping table
650 longname = self._mapping.get(longname_orig, None)
651 if longname is not None:
652 name = longname.split('.')[-1]
653 logging.debug("{0}\n{1}\n{2}\n{3}".format(
654 self._data["metadata"], longname_orig, longname, name))
656 longname = longname_orig
657 name = test.name.lower()
659 # Remove TC number from the TC long name (backward compatibility):
660 self._test_ID = re.sub(self.REGEX_TC_NUMBER, "", longname)
661 # Remove TC number from the TC name (not needed):
662 test_result["name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
664 test_result["parent"] = test.parent.name.lower()
665 test_result["tags"] = tags
666 doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
667 replace('\r', '').replace('[', ' |br| [')
668 test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
669 test_result["msg"] = test.message.replace('\n', ' |br| '). \
670 replace('\r', '').replace('"', "'")
671 test_result["type"] = "FUNC"
672 test_result["status"] = test.status
674 if "PERFTEST" in tags:
675 # Replace info about cores (e.g. -1c-) with the info about threads
676 # and cores (e.g. -1t1c-) in the long test case names and in the
677 # test case names if necessary.
678 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_ID)
681 for tag in test_result["tags"]:
682 groups = re.search(self.REGEX_TC_TAG, tag)
688 self._test_ID = re.sub(self.REGEX_TC_NAME_NEW,
689 "-{0}-".format(tag_tc.lower()),
692 test_result["name"] = re.sub(self.REGEX_TC_NAME_NEW,
693 "-{0}-".format(tag_tc.lower()),
697 test_result["status"] = "FAIL"
698 self._data["tests"][self._test_ID] = test_result
699 logging.error("The test '{0}' has no or more than one "
700 "multi-threading tags.".format(self._test_ID))
701 logging.debug("Tags: {0}".format(test_result["tags"]))
704 if test.status == "PASS" and ("NDRPDRDISC" in tags or
709 # TODO: Remove when definitely no NDRPDRDISC tests are used:
710 if "NDRDISC" in tags:
711 test_result["type"] = "NDR"
712 # TODO: Remove when definitely no NDRPDRDISC tests are used:
713 elif "PDRDISC" in tags:
714 test_result["type"] = "PDR"
715 elif "NDRPDR" in tags:
716 test_result["type"] = "NDRPDR"
718 test_result["type"] = "TCP"
720 test_result["type"] = "MRR"
721 elif "FRMOBL" in tags or "BMRR" in tags:
722 test_result["type"] = "BMRR"
724 test_result["status"] = "FAIL"
725 self._data["tests"][self._test_ID] = test_result
728 # TODO: Remove when definitely no NDRPDRDISC tests are used:
729 if test_result["type"] in ("NDR", "PDR"):
731 rate_value = str(re.search(
732 self.REGEX_RATE, test.message).group(1))
733 except AttributeError:
736 rate_unit = str(re.search(
737 self.REGEX_RATE, test.message).group(2))
738 except AttributeError:
741 test_result["throughput"] = dict()
742 test_result["throughput"]["value"] = \
743 int(rate_value.split('.')[0])
744 test_result["throughput"]["unit"] = rate_unit
745 test_result["latency"] = \
746 self._get_latency(test.message, test_result["type"])
747 if test_result["type"] == "PDR":
748 test_result["lossTolerance"] = str(re.search(
749 self.REGEX_TOLERANCE, test.message).group(1))
751 elif test_result["type"] in ("NDRPDR", ):
752 test_result["throughput"], test_result["status"] = \
753 self._get_ndrpdr_throughput(test.message)
754 test_result["latency"], test_result["status"] = \
755 self._get_ndrpdr_latency(test.message)
757 elif test_result["type"] in ("TCP", ):
758 groups = re.search(self.REGEX_TCP, test.message)
759 test_result["result"] = int(groups.group(2))
761 elif test_result["type"] in ("MRR", "BMRR"):
762 test_result["result"] = dict()
763 groups = re.search(self.REGEX_BMRR, test.message)
764 if groups is not None:
765 items_str = groups.group(1)
766 items_float = [float(item.strip()) for item
767 in items_str.split(",")]
768 metadata = AvgStdevMetadataFactory.from_data(items_float)
769 # Next two lines have been introduced in CSIT-1179,
770 # to be removed in CSIT-1180.
773 test_result["result"]["receive-rate"] = metadata
775 groups = re.search(self.REGEX_MRR, test.message)
776 test_result["result"]["receive-rate"] = \
777 AvgStdevMetadataFactory.from_data([
778 float(groups.group(3)) / float(groups.group(1)), ])
780 self._data["tests"][self._test_ID] = test_result
782 def end_test(self, test):
783 """Called when test ends.
785 :param test: Test to process.
791 def visit_keyword(self, keyword):
792 """Implements traversing through the keyword and its child keywords.
794 :param keyword: Keyword to process.
795 :type keyword: Keyword
798 if self.start_keyword(keyword) is not False:
799 self.end_keyword(keyword)
801 def start_keyword(self, keyword):
802 """Called when keyword starts. Default implementation does nothing.
804 :param keyword: Keyword to process.
805 :type keyword: Keyword
809 if keyword.type == "setup":
810 self.visit_setup_kw(keyword)
811 elif keyword.type == "teardown":
812 self._lookup_kw_nr = 0
813 self.visit_teardown_kw(keyword)
815 self._lookup_kw_nr = 0
816 self.visit_test_kw(keyword)
817 except AttributeError:
820 def end_keyword(self, keyword):
821 """Called when keyword ends. Default implementation does nothing.
823 :param keyword: Keyword to process.
824 :type keyword: Keyword
829 def visit_test_kw(self, test_kw):
830 """Implements traversing through the test keyword and its child
833 :param test_kw: Keyword to process.
834 :type test_kw: Keyword
837 for keyword in test_kw.keywords:
838 if self.start_test_kw(keyword) is not False:
839 self.visit_test_kw(keyword)
840 self.end_test_kw(keyword)
842 def start_test_kw(self, test_kw):
843 """Called when test keyword starts. Default implementation does
846 :param test_kw: Keyword to process.
847 :type test_kw: Keyword
850 if test_kw.name.count("Show Runtime Counters On All Duts"):
851 self._lookup_kw_nr += 1
852 self._show_run_lookup_nr = 0
853 self._msg_type = "test-show-runtime"
854 elif test_kw.name.count("Start The L2fwd Test") and not self._version:
855 self._msg_type = "dpdk-version"
858 test_kw.messages.visit(self)
860 def end_test_kw(self, test_kw):
861 """Called when keyword ends. Default implementation does nothing.
863 :param test_kw: Keyword to process.
864 :type test_kw: Keyword
869 def visit_setup_kw(self, setup_kw):
870 """Implements traversing through the teardown keyword and its child
873 :param setup_kw: Keyword to process.
874 :type setup_kw: Keyword
877 for keyword in setup_kw.keywords:
878 if self.start_setup_kw(keyword) is not False:
879 self.visit_setup_kw(keyword)
880 self.end_setup_kw(keyword)
882 def start_setup_kw(self, setup_kw):
883 """Called when teardown keyword starts. Default implementation does
886 :param setup_kw: Keyword to process.
887 :type setup_kw: Keyword
890 if setup_kw.name.count("Show Vpp Version On All Duts") \
891 and not self._version:
892 self._msg_type = "vpp-version"
894 elif setup_kw.name.count("Setup performance global Variables") \
895 and not self._timestamp:
896 self._msg_type = "timestamp"
899 setup_kw.messages.visit(self)
901 def end_setup_kw(self, setup_kw):
902 """Called when keyword ends. Default implementation does nothing.
904 :param setup_kw: Keyword to process.
905 :type setup_kw: Keyword
910 def visit_teardown_kw(self, teardown_kw):
911 """Implements traversing through the teardown keyword and its child
914 :param teardown_kw: Keyword to process.
915 :type teardown_kw: Keyword
918 for keyword in teardown_kw.keywords:
919 if self.start_teardown_kw(keyword) is not False:
920 self.visit_teardown_kw(keyword)
921 self.end_teardown_kw(keyword)
923 def start_teardown_kw(self, teardown_kw):
924 """Called when teardown keyword starts. Default implementation does
927 :param teardown_kw: Keyword to process.
928 :type teardown_kw: Keyword
932 if teardown_kw.name.count("Show Vat History On All Duts"):
933 self._vat_history_lookup_nr = 0
934 self._msg_type = "teardown-vat-history"
935 teardown_kw.messages.visit(self)
937 def end_teardown_kw(self, teardown_kw):
938 """Called when keyword ends. Default implementation does nothing.
940 :param teardown_kw: Keyword to process.
941 :type teardown_kw: Keyword
946 def visit_message(self, msg):
947 """Implements visiting the message.
949 :param msg: Message to process.
953 if self.start_message(msg) is not False:
954 self.end_message(msg)
956 def start_message(self, msg):
957 """Called when message starts. Get required information from messages:
960 :param msg: Message to process.
966 self.parse_msg[self._msg_type](msg)
968 def end_message(self, msg):
969 """Called when message ends. Default implementation does nothing.
971 :param msg: Message to process.
978 class InputData(object):
981 The data is extracted from output.xml files generated by Jenkins jobs and
982 stored in pandas' DataFrames.
988 (as described in ExecutionChecker documentation)
990 (as described in ExecutionChecker documentation)
992 (as described in ExecutionChecker documentation)
995 def __init__(self, spec):
998 :param spec: Specification.
999 :type spec: Specification
1006 self._input_data = pd.Series()
1010 """Getter - Input data.
1012 :returns: Input data
1013 :rtype: pandas.Series
1015 return self._input_data
1017 def metadata(self, job, build):
1018 """Getter - metadata
1020 :param job: Job which metadata we want.
1021 :param build: Build which metadata we want.
1025 :rtype: pandas.Series
1028 return self.data[job][build]["metadata"]
1030 def suites(self, job, build):
1033 :param job: Job which suites we want.
1034 :param build: Build which suites we want.
1038 :rtype: pandas.Series
1041 return self.data[job][str(build)]["suites"]
1043 def tests(self, job, build):
1046 :param job: Job which tests we want.
1047 :param build: Build which tests we want.
1051 :rtype: pandas.Series
1054 return self.data[job][build]["tests"]
1056 def _parse_tests(self, job, build, log):
1057 """Process data from robot output.xml file and return JSON structured
1060 :param job: The name of job which build output data will be processed.
1061 :param build: The build which output data will be processed.
1062 :param log: List of log messages.
1065 :type log: list of tuples (severity, msg)
1066 :returns: JSON data structure.
1075 with open(build["file-name"], 'r') as data_file:
1077 result = ExecutionResult(data_file)
1078 except errors.DataError as err:
1079 log.append(("ERROR", "Error occurred while parsing output.xml: "
1082 checker = ExecutionChecker(metadata, self._cfg.mapping,
1084 result.visit(checker)
1088 def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
1089 """Download and parse the input data file.
1091 :param pid: PID of the process executing this method.
1092 :param data_queue: Shared memory between processes. Queue which keeps
1093 the result data. This data is then read by the main process and used
1094 in further processing.
1095 :param job: Name of the Jenkins job which generated the processed input
1097 :param build: Information about the Jenkins build which generated the
1098 processed input file.
1099 :param repeat: Repeat the download specified number of times if not
1102 :type data_queue: multiprocessing.Manager().Queue()
1110 logging.info(" Processing the job/build: {0}: {1}".
1111 format(job, build["build"]))
1113 logs.append(("INFO", " Processing the job/build: {0}: {1}".
1114 format(job, build["build"])))
1121 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1127 logs.append(("ERROR", "It is not possible to download the input "
1128 "data file from the job '{job}', build "
1129 "'{build}', or it is damaged. Skipped.".
1130 format(job=job, build=build["build"])))
1132 logs.append(("INFO", " Processing data from the build '{0}' ...".
1133 format(build["build"])))
1134 data = self._parse_tests(job, build, logs)
1136 logs.append(("ERROR", "Input data file from the job '{job}', "
1137 "build '{build}' is damaged. Skipped.".
1138 format(job=job, build=build["build"])))
1143 remove(build["file-name"])
1144 except OSError as err:
1145 logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
1146 format(build["file-name"], err)))
1147 logs.append(("INFO", " Done."))
1156 data_queue.put(result)
1158 def download_and_parse_data(self, repeat=1):
1159 """Download the input data files, parse input data from input files and
1160 store in pandas' Series.
1162 :param repeat: Repeat the download specified number of times if not
1167 logging.info("Downloading and parsing input files ...")
1169 work_queue = multiprocessing.JoinableQueue()
1170 manager = multiprocessing.Manager()
1171 data_queue = manager.Queue()
1172 cpus = multiprocessing.cpu_count()
1175 for cpu in range(cpus):
1176 worker = Worker(work_queue,
1178 self._download_and_parse_build)
1179 worker.daemon = True
1181 workers.append(worker)
1182 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
1183 format(cpu, worker.pid))
1185 for job, builds in self._cfg.builds.items():
1186 for build in builds:
1187 work_queue.put((job, build, repeat))
1191 logging.info("Done.")
1193 while not data_queue.empty():
1194 result = data_queue.get()
1197 build_nr = result["build"]["build"]
1200 data = result["data"]
1201 build_data = pd.Series({
1202 "metadata": pd.Series(data["metadata"].values(),
1203 index=data["metadata"].keys()),
1204 "suites": pd.Series(data["suites"].values(),
1205 index=data["suites"].keys()),
1206 "tests": pd.Series(data["tests"].values(),
1207 index=data["tests"].keys())})
1209 if self._input_data.get(job, None) is None:
1210 self._input_data[job] = pd.Series()
1211 self._input_data[job][str(build_nr)] = build_data
1213 self._cfg.set_input_file_name(job, build_nr,
1214 result["build"]["file-name"])
1216 self._cfg.set_input_state(job, build_nr, result["state"])
1218 for item in result["logs"]:
1219 if item[0] == "INFO":
1220 logging.info(item[1])
1221 elif item[0] == "ERROR":
1222 logging.error(item[1])
1223 elif item[0] == "DEBUG":
1224 logging.debug(item[1])
1225 elif item[0] == "CRITICAL":
1226 logging.critical(item[1])
1227 elif item[0] == "WARNING":
1228 logging.warning(item[1])
1232 # Terminate all workers
1233 for worker in workers:
1237 logging.info("Done.")
1240 def _end_of_tag(tag_filter, start=0, closer="'"):
1241 """Return the index of character in the string which is the end of tag.
1243 :param tag_filter: The string where the end of tag is being searched.
1244 :param start: The index where the searching is stated.
1245 :param closer: The character which is the tag closer.
1246 :type tag_filter: str
1249 :returns: The index of the tag closer.
1254 idx_opener = tag_filter.index(closer, start)
1255 return tag_filter.index(closer, idx_opener + 1)
1260 def _condition(tag_filter):
1261 """Create a conditional statement from the given tag filter.
1263 :param tag_filter: Filter based on tags from the element specification.
1264 :type tag_filter: str
1265 :returns: Conditional statement which can be evaluated.
1271 index = InputData._end_of_tag(tag_filter, index)
1275 tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
1277 def filter_data(self, element, params=None, data_set="tests",
1278 continue_on_error=False):
1279 """Filter required data from the given jobs and builds.
1281 The output data structure is:
1285 - test (or suite) 1 ID:
1291 - test (or suite) n ID:
1298 :param element: Element which will use the filtered data.
1299 :param params: Parameters which will be included in the output. If None,
1300 all parameters are included.
1301 :param data_set: The set of data to be filtered: tests, suites,
1303 :param continue_on_error: Continue if there is error while reading the
1304 data. The Item will be empty then
1305 :type element: pandas.Series
1308 :type continue_on_error: bool
1309 :returns: Filtered data.
1310 :rtype pandas.Series
1314 if element["filter"] in ("all", "template"):
1317 cond = InputData._condition(element["filter"])
1318 logging.debug(" Filter: {0}".format(cond))
1320 logging.error(" No filter defined.")
1324 params = element.get("parameters", None)
1326 params.append("type")
1330 for job, builds in element["data"].items():
1331 data[job] = pd.Series()
1332 for build in builds:
1333 data[job][str(build)] = pd.Series()
1335 data_iter = self.data[job][str(build)][data_set].\
1338 if continue_on_error:
1342 for test_ID, test_data in data_iter:
1343 if eval(cond, {"tags": test_data.get("tags", "")}):
1344 data[job][str(build)][test_ID] = pd.Series()
1346 for param, val in test_data.items():
1347 data[job][str(build)][test_ID][param] = val
1349 for param in params:
1351 data[job][str(build)][test_ID][param] =\
1354 data[job][str(build)][test_ID][param] =\
1358 except (KeyError, IndexError, ValueError) as err:
1359 logging.error(" Missing mandatory parameter in the element "
1360 "specification: {0}".format(err))
1362 except AttributeError:
1365 logging.error(" The filter '{0}' is not correct. Check if all "
1366 "tags are enclosed by apostrophes.".format(cond))
1370 def merge_data(data):
1371 """Merge data from more jobs and builds to a simple data structure.
1373 The output data structure is:
1375 - test (suite) 1 ID:
1381 - test (suite) n ID:
1384 :param data: Data to merge.
1385 :type data: pandas.Series
1386 :returns: Merged data.
1387 :rtype: pandas.Series
1390 logging.info(" Merging data ...")
1392 merged_data = pd.Series()
1393 for _, builds in data.iteritems():
1394 for _, item in builds.iteritems():
1395 for ID, item_data in item.iteritems():
1396 merged_data[ID] = item_data