1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "conf-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
132 "parent": "Name of the parent of the test",
133 "doc": "Test documentation",
134 "msg": "Test message",
135 "tags": ["tag 1", "tag 2", "tag n"],
137 "status": "PASS" | "FAIL",
144 "parent": "Name of the parent of the test",
145 "doc": "Test documentation",
146 "msg": "Test message",
147 "tags": ["tag 1", "tag 2", "tag n"],
148 "type": "MRR" | "BMRR",
149 "status": "PASS" | "FAIL",
151 "receive-rate": float,
152 # Average of a list, computed using AvgStdevStats.
153 # In CSIT-1180, replace with List[float].
167 "metadata": { # Optional
168 "version": "VPP version",
169 "job": "Jenkins job name",
170 "build": "Information about the build"
174 "doc": "Suite 1 documentation",
175 "parent": "Suite 1 parent",
176 "level": "Level of the suite in the suite hierarchy"
179 "doc": "Suite N documentation",
180 "parent": "Suite 2 parent",
181 "level": "Level of the suite in the suite hierarchy"
187 "parent": "Name of the parent of the test",
188 "doc": "Test documentation"
189 "msg": "Test message"
190 "tags": ["tag 1", "tag 2", "tag n"],
191 "conf-history": "DUT1 and DUT2 VAT History"
192 "show-run": "Show Run"
193 "status": "PASS" | "FAIL"
201 .. note:: ID is the lowercase full path to the test.
204 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205 r'PLRsearch upper bound::?\s(\d+.\d+)')
207 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208 r'NDR_UPPER:\s(\d+.\d+).*\n'
209 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'PDR_UPPER:\s(\d+.\d+)')
212 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
215 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
218 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
219 r"VPP Version:\s*|VPP version:\s*)(.*)")
221 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
223 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
225 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
226 r'tx\s(\d*),\srx\s(\d*)')
228 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
229 r' in packets per second: \[(.*)\]')
231 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
232 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
234 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
236 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
238 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
240 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
242 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
244 def __init__(self, metadata, mapping, ignore):
247 :param metadata: Key-value pairs to be included in "metadata" part of
249 :param mapping: Mapping of the old names of test cases to the new
251 :param ignore: List of TCs to be ignored.
257 # Type of message to parse out from the test messages
258 self._msg_type = None
264 self._timestamp = None
266 # Testbed. The testbed is identified by TG node IP address.
269 # Mapping of TCs long names
270 self._mapping = mapping
273 self._ignore = ignore
275 # Number of PAPI History messages found:
277 # 1 - PAPI History of DUT1
278 # 2 - PAPI History of DUT2
279 self._lookup_kw_nr = 0
280 self._conf_history_lookup_nr = 0
282 # Number of Show Running messages found
284 # 1 - Show run message found
285 self._show_run_lookup_nr = 0
287 # Test ID of currently processed test- the lowercase full path to the
291 # The main data structure
293 u"metadata": OrderedDict(),
294 u"suites": OrderedDict(),
295 u"tests": OrderedDict()
298 # Save the provided metadata
299 for key, val in metadata.items():
300 self._data[u"metadata"][key] = val
302 # Dictionary defining the methods used to parse different types of
305 u"timestamp": self._get_timestamp,
306 u"vpp-version": self._get_vpp_version,
307 u"dpdk-version": self._get_dpdk_version,
308 # TODO: Remove when not needed:
309 u"teardown-vat-history": self._get_vat_history,
310 u"teardown-papi-history": self._get_papi_history,
311 u"test-show-runtime": self._get_show_run,
312 u"testbed": self._get_testbed
317 """Getter - Data parsed from the XML file.
319 :returns: Data parsed from the XML file.
324 def _get_testbed(self, msg):
325 """Called when extraction of testbed IP is required.
326 The testbed is identified by TG node IP address.
328 :param msg: Message to process.
333 if msg.message.count(u"Setup of TG node") or \
334 msg.message.count(u"Setup of node TG host"):
335 reg_tg_ip = re.compile(
336 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
338 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
339 except (KeyError, ValueError, IndexError, AttributeError):
342 self._data[u"metadata"][u"testbed"] = self._testbed
343 self._msg_type = None
345 def _get_vpp_version(self, msg):
346 """Called when extraction of VPP version is required.
348 :param msg: Message to process.
353 if msg.message.count(u"return STDOUT Version:") or \
354 msg.message.count(u"VPP Version:") or \
355 msg.message.count(u"VPP version:"):
356 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
358 self._data[u"metadata"][u"version"] = self._version
359 self._msg_type = None
361 def _get_dpdk_version(self, msg):
362 """Called when extraction of DPDK version is required.
364 :param msg: Message to process.
369 if msg.message.count(u"DPDK Version:"):
371 self._version = str(re.search(
372 self.REGEX_VERSION_DPDK, msg.message).group(2))
373 self._data[u"metadata"][u"version"] = self._version
377 self._msg_type = None
379 def _get_timestamp(self, msg):
380 """Called when extraction of timestamp is required.
382 :param msg: Message to process.
387 self._timestamp = msg.timestamp[:14]
388 self._data[u"metadata"][u"generated"] = self._timestamp
389 self._msg_type = None
391 def _get_vat_history(self, msg):
392 """Called when extraction of VAT command history is required.
394 TODO: Remove when not needed.
396 :param msg: Message to process.
400 if msg.message.count(u"VAT command history:"):
401 self._conf_history_lookup_nr += 1
402 if self._conf_history_lookup_nr == 1:
403 self._data[u"tests"][self._test_id][u"conf-history"] = str()
405 self._msg_type = None
406 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
407 r"VAT command history:", u"",
408 msg.message, count=1).replace(u'\n', u' |br| ').\
411 self._data[u"tests"][self._test_id][u"conf-history"] += (
412 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
415 def _get_papi_history(self, msg):
416 """Called when extraction of PAPI command history is required.
418 :param msg: Message to process.
422 if msg.message.count(u"PAPI command history:"):
423 self._conf_history_lookup_nr += 1
424 if self._conf_history_lookup_nr == 1:
425 self._data[u"tests"][self._test_id][u"conf-history"] = str()
427 self._msg_type = None
428 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
429 r"PAPI command history:", u"",
430 msg.message, count=1).replace(u'\n', u' |br| ').\
432 self._data[u"tests"][self._test_id][u"conf-history"] += (
433 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
436 def _get_show_run(self, msg):
437 """Called when extraction of VPP operational data (output of CLI command
438 Show Runtime) is required.
440 :param msg: Message to process.
445 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
446 self._data[u"tests"][self._test_id][u"show-run"] = str()
448 if msg.message.count(u"stats runtime") or \
449 msg.message.count(u"Runtime"):
451 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
453 except (AttributeError, IndexError):
454 host = self._data[u"tests"][self._test_id][u"show-run"].\
457 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
459 socket = f"/{socket}"
460 except (AttributeError, IndexError):
467 replace(u'b"', u'"').
468 replace(u'u"', u'"').
472 threads_nr = len(runtime[0][u"clocks"])
473 except (IndexError, KeyError):
483 table = [[tbl_hdr, ] for _ in range(threads_nr)]
485 for idx in range(threads_nr):
486 name = format(item[u"name"])
487 calls = format(item[u"calls"][idx])
488 vectors = format(item[u"vectors"][idx])
489 suspends = format(item[u"suspends"][idx])
490 if item[u"vectors"][idx] > 0:
492 item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
493 elif item[u"calls"][idx] > 0:
495 item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
496 elif item[u"suspends"][idx] > 0:
498 item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
501 if item[u"calls"][idx] > 0:
502 vectors_call = format(
503 item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
505 vectors_call = format(0, u".2f")
506 if int(calls) + int(vectors) + int(suspends):
508 name, calls, vectors, suspends, clocks, vectors_call
511 for idx in range(threads_nr):
512 text += f"Thread {idx} "
513 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
515 for row in table[idx]:
516 if txt_table is None:
517 txt_table = prettytable.PrettyTable(row)
520 txt_table.add_row(row)
521 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
522 txt_table.align[u"Name"] = u"l"
523 txt_table.align[u"Calls"] = u"r"
524 txt_table.align[u"Vectors"] = u"r"
525 txt_table.align[u"Suspends"] = u"r"
526 txt_table.align[u"Clocks"] = u"r"
527 txt_table.align[u"Vectors/Calls"] = u"r"
529 text += txt_table.get_string(sortby=u"Name") + u'\n'
530 text = f"\n**DUT: {host}{socket}**\n{text}".\
531 replace(u'\n', u' |br| ').\
532 replace(u'\r', u'').\
534 self._data[u"tests"][self._test_id][u"show-run"] += text
536 def _get_ndrpdr_throughput(self, msg):
537 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
540 :param msg: The test message to be parsed.
542 :returns: Parsed data as a dict and the status (PASS/FAIL).
543 :rtype: tuple(dict, str)
547 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
548 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
551 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
553 if groups is not None:
555 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
556 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
557 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
558 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
560 except (IndexError, ValueError):
563 return throughput, status
565 def _get_plr_throughput(self, msg):
566 """Get PLRsearch lower bound and PLRsearch upper bound from the test
569 :param msg: The test message to be parsed.
571 :returns: Parsed data as a dict and the status (PASS/FAIL).
572 :rtype: tuple(dict, str)
580 groups = re.search(self.REGEX_PLR_RATE, msg)
582 if groups is not None:
584 throughput[u"LOWER"] = float(groups.group(1))
585 throughput[u"UPPER"] = float(groups.group(2))
587 except (IndexError, ValueError):
590 return throughput, status
592 def _get_ndrpdr_latency(self, msg):
593 """Get LATENCY from the test message.
595 :param msg: The test message to be parsed.
597 :returns: Parsed data as a dict and the status (PASS/FAIL).
598 :rtype: tuple(dict, str)
608 u"direction1": copy.copy(latency_default),
609 u"direction2": copy.copy(latency_default)
612 u"direction1": copy.copy(latency_default),
613 u"direction2": copy.copy(latency_default)
617 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
619 def process_latency(in_str):
620 """Return object with parsed latency values.
622 TODO: Define class for the return type.
624 :param in_str: Input string, min/avg/max/hdrh format.
626 :returns: Dict with corresponding keys, except hdrh float values.
628 :throws IndexError: If in_str does not have enough substrings.
629 :throws ValueError: If a substring does not convert to float.
631 in_list = in_str.split('/', 3)
634 u"min": float(in_list[0]),
635 u"avg": float(in_list[1]),
636 u"max": float(in_list[2]),
640 if len(in_list) == 4:
641 rval[u"hdrh"] = str(in_list[3])
645 if groups is not None:
647 latency[u"NDR"][u"direction1"] = \
648 process_latency(groups.group(1))
649 latency[u"NDR"][u"direction2"] = \
650 process_latency(groups.group(2))
651 latency[u"PDR"][u"direction1"] = \
652 process_latency(groups.group(3))
653 latency[u"PDR"][u"direction2"] = \
654 process_latency(groups.group(4))
656 except (IndexError, ValueError):
659 return latency, status
661 def visit_suite(self, suite):
662 """Implements traversing through the suite and its direct children.
664 :param suite: Suite to process.
668 if self.start_suite(suite) is not False:
669 suite.suites.visit(self)
670 suite.tests.visit(self)
671 self.end_suite(suite)
673 def start_suite(self, suite):
674 """Called when suite starts.
676 :param suite: Suite to process.
682 parent_name = suite.parent.name
683 except AttributeError:
686 doc_str = suite.doc.\
687 replace(u'"', u"'").\
688 replace(u'\n', u' ').\
689 replace(u'\r', u'').\
690 replace(u'*[', u' |br| *[').\
691 replace(u"*", u"**").\
692 replace(u' |br| *[', u'*[', 1)
694 self._data[u"suites"][suite.longname.lower().
696 replace(u" ", u"_")] = {
697 u"name": suite.name.lower(),
699 u"parent": parent_name,
700 u"level": len(suite.longname.split(u"."))
703 suite.keywords.visit(self)
705 def end_suite(self, suite):
706 """Called when suite ends.
708 :param suite: Suite to process.
713 def visit_test(self, test):
714 """Implements traversing through the test.
716 :param test: Test to process.
720 if self.start_test(test) is not False:
721 test.keywords.visit(self)
724 def start_test(self, test):
725 """Called when test starts.
727 :param test: Test to process.
732 longname_orig = test.longname.lower()
734 # Check the ignore list
735 if longname_orig in self._ignore:
738 tags = [str(tag) for tag in test.tags]
741 # Change the TC long name and name if defined in the mapping table
742 longname = self._mapping.get(longname_orig, None)
743 if longname is not None:
744 name = longname.split(u'.')[-1]
746 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
750 longname = longname_orig
751 name = test.name.lower()
753 # Remove TC number from the TC long name (backward compatibility):
754 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
755 # Remove TC number from the TC name (not needed):
756 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
758 test_result[u"parent"] = test.parent.name.lower()
759 test_result[u"tags"] = tags
760 test_result["doc"] = test.doc.\
761 replace(u'"', u"'").\
762 replace(u'\n', u' ').\
763 replace(u'\r', u'').\
764 replace(u'[', u' |br| [').\
765 replace(u' |br| [', u'[', 1)
766 test_result[u"msg"] = test.message.\
767 replace(u'\n', u' |br| ').\
768 replace(u'\r', u'').\
770 test_result[u"type"] = u"FUNC"
771 test_result[u"status"] = test.status
773 if u"PERFTEST" in tags:
774 # Replace info about cores (e.g. -1c-) with the info about threads
775 # and cores (e.g. -1t1c-) in the long test case names and in the
776 # test case names if necessary.
777 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
781 for tag in test_result[u"tags"]:
782 groups = re.search(self.REGEX_TC_TAG, tag)
788 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
789 f"-{tag_tc.lower()}-",
792 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
793 f"-{tag_tc.lower()}-",
797 test_result[u"status"] = u"FAIL"
798 self._data[u"tests"][self._test_id] = test_result
800 f"The test {self._test_id} has no or more than one "
801 f"multi-threading tags.\n"
802 f"Tags: {test_result[u'tags']}"
806 if test.status == u"PASS":
807 if u"NDRPDR" in tags:
808 test_result[u"type"] = u"NDRPDR"
809 test_result[u"throughput"], test_result[u"status"] = \
810 self._get_ndrpdr_throughput(test.message)
811 test_result[u"latency"], test_result[u"status"] = \
812 self._get_ndrpdr_latency(test.message)
813 elif u"SOAK" in tags:
814 test_result[u"type"] = u"SOAK"
815 test_result[u"throughput"], test_result[u"status"] = \
816 self._get_plr_throughput(test.message)
818 test_result[u"type"] = u"TCP"
819 groups = re.search(self.REGEX_TCP, test.message)
820 test_result[u"result"] = int(groups.group(2))
821 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
823 test_result[u"type"] = u"MRR"
825 test_result[u"type"] = u"BMRR"
827 test_result[u"result"] = dict()
828 groups = re.search(self.REGEX_BMRR, test.message)
829 if groups is not None:
830 items_str = groups.group(1)
831 items_float = [float(item.strip()) for item
832 in items_str.split(",")]
833 # Use whole list in CSIT-1180.
834 stats = jumpavg.AvgStdevStats.for_runs(items_float)
835 test_result[u"result"][u"receive-rate"] = stats.avg
837 groups = re.search(self.REGEX_MRR, test.message)
838 test_result[u"result"][u"receive-rate"] = \
839 float(groups.group(3)) / float(groups.group(1))
840 elif u"RECONF" in tags:
841 test_result[u"type"] = u"RECONF"
842 test_result[u"result"] = None
844 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
845 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
846 test_result[u"result"] = {
847 u"loss": int(grps_loss.group(1)),
848 u"time": float(grps_time.group(1))
850 except (AttributeError, IndexError, ValueError, TypeError):
851 test_result[u"status"] = u"FAIL"
853 test_result[u"status"] = u"FAIL"
854 self._data[u"tests"][self._test_id] = test_result
857 self._data[u"tests"][self._test_id] = test_result
859 def end_test(self, test):
860 """Called when test ends.
862 :param test: Test to process.
867 def visit_keyword(self, keyword):
868 """Implements traversing through the keyword and its child keywords.
870 :param keyword: Keyword to process.
871 :type keyword: Keyword
874 if self.start_keyword(keyword) is not False:
875 self.end_keyword(keyword)
877 def start_keyword(self, keyword):
878 """Called when keyword starts. Default implementation does nothing.
880 :param keyword: Keyword to process.
881 :type keyword: Keyword
885 if keyword.type == u"setup":
886 self.visit_setup_kw(keyword)
887 elif keyword.type == u"teardown":
888 self._lookup_kw_nr = 0
889 self.visit_teardown_kw(keyword)
891 self._lookup_kw_nr = 0
892 self.visit_test_kw(keyword)
893 except AttributeError:
896 def end_keyword(self, keyword):
897 """Called when keyword ends. Default implementation does nothing.
899 :param keyword: Keyword to process.
900 :type keyword: Keyword
904 def visit_test_kw(self, test_kw):
905 """Implements traversing through the test keyword and its child
908 :param test_kw: Keyword to process.
909 :type test_kw: Keyword
912 for keyword in test_kw.keywords:
913 if self.start_test_kw(keyword) is not False:
914 self.visit_test_kw(keyword)
915 self.end_test_kw(keyword)
917 def start_test_kw(self, test_kw):
918 """Called when test keyword starts. Default implementation does
921 :param test_kw: Keyword to process.
922 :type test_kw: Keyword
925 if test_kw.name.count(u"Show Runtime On All Duts") or \
926 test_kw.name.count(u"Show Runtime Counters On All Duts"):
927 self._lookup_kw_nr += 1
928 self._show_run_lookup_nr = 0
929 self._msg_type = u"test-show-runtime"
930 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
931 self._msg_type = u"dpdk-version"
934 test_kw.messages.visit(self)
936 def end_test_kw(self, test_kw):
937 """Called when keyword ends. Default implementation does nothing.
939 :param test_kw: Keyword to process.
940 :type test_kw: Keyword
944 def visit_setup_kw(self, setup_kw):
945 """Implements traversing through the teardown keyword and its child
948 :param setup_kw: Keyword to process.
949 :type setup_kw: Keyword
952 for keyword in setup_kw.keywords:
953 if self.start_setup_kw(keyword) is not False:
954 self.visit_setup_kw(keyword)
955 self.end_setup_kw(keyword)
957 def start_setup_kw(self, setup_kw):
958 """Called when teardown keyword starts. Default implementation does
961 :param setup_kw: Keyword to process.
962 :type setup_kw: Keyword
965 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
966 and not self._version:
967 self._msg_type = u"vpp-version"
968 elif setup_kw.name.count(u"Set Global Variable") \
969 and not self._timestamp:
970 self._msg_type = u"timestamp"
971 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
972 self._msg_type = u"testbed"
975 setup_kw.messages.visit(self)
977 def end_setup_kw(self, setup_kw):
978 """Called when keyword ends. Default implementation does nothing.
980 :param setup_kw: Keyword to process.
981 :type setup_kw: Keyword
985 def visit_teardown_kw(self, teardown_kw):
986 """Implements traversing through the teardown keyword and its child
989 :param teardown_kw: Keyword to process.
990 :type teardown_kw: Keyword
993 for keyword in teardown_kw.keywords:
994 if self.start_teardown_kw(keyword) is not False:
995 self.visit_teardown_kw(keyword)
996 self.end_teardown_kw(keyword)
998 def start_teardown_kw(self, teardown_kw):
999 """Called when teardown keyword starts
1001 :param teardown_kw: Keyword to process.
1002 :type teardown_kw: Keyword
1006 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1007 # TODO: Remove when not needed:
1008 self._conf_history_lookup_nr = 0
1009 self._msg_type = u"teardown-vat-history"
1010 teardown_kw.messages.visit(self)
1011 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1012 self._conf_history_lookup_nr = 0
1013 self._msg_type = u"teardown-papi-history"
1014 teardown_kw.messages.visit(self)
1016 def end_teardown_kw(self, teardown_kw):
1017 """Called when keyword ends. Default implementation does nothing.
1019 :param teardown_kw: Keyword to process.
1020 :type teardown_kw: Keyword
1024 def visit_message(self, msg):
1025 """Implements visiting the message.
1027 :param msg: Message to process.
1031 if self.start_message(msg) is not False:
1032 self.end_message(msg)
1034 def start_message(self, msg):
1035 """Called when message starts. Get required information from messages:
1038 :param msg: Message to process.
1044 self.parse_msg[self._msg_type](msg)
1046 def end_message(self, msg):
1047 """Called when message ends. Default implementation does nothing.
1049 :param msg: Message to process.
1058 The data is extracted from output.xml files generated by Jenkins jobs and
1059 stored in pandas' DataFrames.
1065 (as described in ExecutionChecker documentation)
1067 (as described in ExecutionChecker documentation)
1069 (as described in ExecutionChecker documentation)
1072 def __init__(self, spec):
1075 :param spec: Specification.
1076 :type spec: Specification
1083 self._input_data = pd.Series()
1087 """Getter - Input data.
1089 :returns: Input data
1090 :rtype: pandas.Series
1092 return self._input_data
1094 def metadata(self, job, build):
1095 """Getter - metadata
1097 :param job: Job which metadata we want.
1098 :param build: Build which metadata we want.
1102 :rtype: pandas.Series
1105 return self.data[job][build][u"metadata"]
1107 def suites(self, job, build):
1110 :param job: Job which suites we want.
1111 :param build: Build which suites we want.
1115 :rtype: pandas.Series
1118 return self.data[job][str(build)][u"suites"]
1120 def tests(self, job, build):
1123 :param job: Job which tests we want.
1124 :param build: Build which tests we want.
1128 :rtype: pandas.Series
1131 return self.data[job][build][u"tests"]
1133 def _parse_tests(self, job, build, log):
1134 """Process data from robot output.xml file and return JSON structured
1137 :param job: The name of job which build output data will be processed.
1138 :param build: The build which output data will be processed.
1139 :param log: List of log messages.
1142 :type log: list of tuples (severity, msg)
1143 :returns: JSON data structure.
1152 with open(build[u"file-name"], u'r') as data_file:
1154 result = ExecutionResult(data_file)
1155 except errors.DataError as err:
1157 (u"ERROR", f"Error occurred while parsing output.xml: "
1161 checker = ExecutionChecker(metadata, self._cfg.mapping,
1163 result.visit(checker)
1167 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1168 """Download and parse the input data file.
1170 :param pid: PID of the process executing this method.
1171 :param job: Name of the Jenkins job which generated the processed input
1173 :param build: Information about the Jenkins build which generated the
1174 processed input file.
1175 :param repeat: Repeat the download specified number of times if not
1186 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1194 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1202 f"It is not possible to download the input data file from the "
1203 f"job {job}, build {build[u'build']}, or it is damaged. "
1209 f" Processing data from the build {build[u'build']} ...")
1211 data = self._parse_tests(job, build, logs)
1215 f"Input data file from the job {job}, build "
1216 f"{build[u'build']} is damaged. Skipped.")
1219 state = u"processed"
1222 remove(build[u"file-name"])
1223 except OSError as err:
1225 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1229 # If the time-period is defined in the specification file, remove all
1230 # files which are outside the time period.
1231 timeperiod = self._cfg.input.get(u"time-period", None)
1232 if timeperiod and data:
1234 timeperiod = timedelta(int(timeperiod))
1235 metadata = data.get(u"metadata", None)
1237 generated = metadata.get(u"generated", None)
1239 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1240 if (now - generated) > timeperiod:
1241 # Remove the data and the file:
1246 f" The build {job}/{build[u'build']} is "
1247 f"outdated, will be removed.")
1249 logs.append((u"INFO", u" Done."))
1251 for level, line in logs:
1252 if level == u"INFO":
1254 elif level == u"ERROR":
1256 elif level == u"DEBUG":
1258 elif level == u"CRITICAL":
1259 logging.critical(line)
1260 elif level == u"WARNING":
1261 logging.warning(line)
1263 return {u"data": data, u"state": state, u"job": job, u"build": build}
1265 def download_and_parse_data(self, repeat=1):
1266 """Download the input data files, parse input data from input files and
1267 store in pandas' Series.
1269 :param repeat: Repeat the download specified number of times if not
1274 logging.info(u"Downloading and parsing input files ...")
1276 for job, builds in self._cfg.builds.items():
1277 for build in builds:
1279 result = self._download_and_parse_build(job, build, repeat)
1280 build_nr = result[u"build"][u"build"]
1283 data = result[u"data"]
1284 build_data = pd.Series({
1285 u"metadata": pd.Series(
1286 list(data[u"metadata"].values()),
1287 index=list(data[u"metadata"].keys())
1289 u"suites": pd.Series(
1290 list(data[u"suites"].values()),
1291 index=list(data[u"suites"].keys())
1293 u"tests": pd.Series(
1294 list(data[u"tests"].values()),
1295 index=list(data[u"tests"].keys())
1299 if self._input_data.get(job, None) is None:
1300 self._input_data[job] = pd.Series()
1301 self._input_data[job][str(build_nr)] = build_data
1303 self._cfg.set_input_file_name(
1304 job, build_nr, result[u"build"][u"file-name"])
1306 self._cfg.set_input_state(job, build_nr, result[u"state"])
1309 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1310 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1312 logging.info(u"Done.")
1315 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1316 """Return the index of character in the string which is the end of tag.
1318 :param tag_filter: The string where the end of tag is being searched.
1319 :param start: The index where the searching is stated.
1320 :param closer: The character which is the tag closer.
1321 :type tag_filter: str
1324 :returns: The index of the tag closer.
1329 idx_opener = tag_filter.index(closer, start)
1330 return tag_filter.index(closer, idx_opener + 1)
1335 def _condition(tag_filter):
1336 """Create a conditional statement from the given tag filter.
1338 :param tag_filter: Filter based on tags from the element specification.
1339 :type tag_filter: str
1340 :returns: Conditional statement which can be evaluated.
1346 index = InputData._end_of_tag(tag_filter, index)
1350 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1352 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1353 continue_on_error=False):
1354 """Filter required data from the given jobs and builds.
1356 The output data structure is:
1360 - test (or suite) 1 ID:
1366 - test (or suite) n ID:
1373 :param element: Element which will use the filtered data.
1374 :param params: Parameters which will be included in the output. If None,
1375 all parameters are included.
1376 :param data: If not None, this data is used instead of data specified
1378 :param data_set: The set of data to be filtered: tests, suites,
1380 :param continue_on_error: Continue if there is error while reading the
1381 data. The Item will be empty then
1382 :type element: pandas.Series
1386 :type continue_on_error: bool
1387 :returns: Filtered data.
1388 :rtype pandas.Series
1392 if element[u"filter"] in (u"all", u"template"):
1395 cond = InputData._condition(element[u"filter"])
1396 logging.debug(f" Filter: {cond}")
1398 logging.error(u" No filter defined.")
1402 params = element.get(u"parameters", None)
1404 params.append(u"type")
1406 data_to_filter = data if data else element[u"data"]
1409 for job, builds in data_to_filter.items():
1410 data[job] = pd.Series()
1411 for build in builds:
1412 data[job][str(build)] = pd.Series()
1415 self.data[job][str(build)][data_set].items())
1417 if continue_on_error:
1421 for test_id, test_data in data_dict.items():
1422 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1423 data[job][str(build)][test_id] = pd.Series()
1425 for param, val in test_data.items():
1426 data[job][str(build)][test_id][param] = val
1428 for param in params:
1430 data[job][str(build)][test_id][param] =\
1433 data[job][str(build)][test_id][param] =\
1437 except (KeyError, IndexError, ValueError) as err:
1439 f"Missing mandatory parameter in the element specification: "
1443 except AttributeError as err:
1444 logging.error(repr(err))
1446 except SyntaxError as err:
1448 f"The filter {cond} is not correct. Check if all tags are "
1449 f"enclosed by apostrophes.\n{repr(err)}"
1453 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1454 continue_on_error=False):
1455 """Filter required data from the given jobs and builds.
1457 The output data structure is:
1461 - test (or suite) 1 ID:
1467 - test (or suite) n ID:
1474 :param element: Element which will use the filtered data.
1475 :param params: Parameters which will be included in the output. If None,
1476 all parameters are included.
1477 :param data_set: The set of data to be filtered: tests, suites,
1479 :param continue_on_error: Continue if there is error while reading the
1480 data. The Item will be empty then
1481 :type element: pandas.Series
1484 :type continue_on_error: bool
1485 :returns: Filtered data.
1486 :rtype pandas.Series
1489 include = element.get(u"include", None)
1491 logging.warning(u"No tests to include, skipping the element.")
1495 params = element.get(u"parameters", None)
1497 params.append(u"type")
1501 for job, builds in element[u"data"].items():
1502 data[job] = pd.Series()
1503 for build in builds:
1504 data[job][str(build)] = pd.Series()
1505 for test in include:
1507 reg_ex = re.compile(str(test).lower())
1508 for test_id in self.data[job][
1509 str(build)][data_set].keys():
1510 if re.match(reg_ex, str(test_id).lower()):
1511 test_data = self.data[job][
1512 str(build)][data_set][test_id]
1513 data[job][str(build)][test_id] = pd.Series()
1515 for param, val in test_data.items():
1516 data[job][str(build)][test_id]\
1519 for param in params:
1521 data[job][str(build)][
1525 data[job][str(build)][
1526 test_id][param] = u"No Data"
1527 except KeyError as err:
1528 logging.error(repr(err))
1529 if continue_on_error:
1534 except (KeyError, IndexError, ValueError) as err:
1536 f"Missing mandatory parameter in the element "
1537 f"specification: {repr(err)}"
1540 except AttributeError as err:
1541 logging.error(repr(err))
1545 def merge_data(data):
1546 """Merge data from more jobs and builds to a simple data structure.
1548 The output data structure is:
1550 - test (suite) 1 ID:
1556 - test (suite) n ID:
1559 :param data: Data to merge.
1560 :type data: pandas.Series
1561 :returns: Merged data.
1562 :rtype: pandas.Series
1565 logging.info(u" Merging data ...")
1567 merged_data = pd.Series()
1568 for builds in data.values:
1569 for item in builds.values:
1570 for item_id, item_data in item.items():
1571 merged_data[item_id] = item_data