1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "conf-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
132 "parent": "Name of the parent of the test",
133 "doc": "Test documentation",
134 "msg": "Test message",
135 "tags": ["tag 1", "tag 2", "tag n"],
137 "status": "PASS" | "FAIL",
144 "parent": "Name of the parent of the test",
145 "doc": "Test documentation",
146 "msg": "Test message",
147 "tags": ["tag 1", "tag 2", "tag n"],
148 "type": "MRR" | "BMRR",
149 "status": "PASS" | "FAIL",
151 "receive-rate": float,
152 # Average of a list, computed using AvgStdevStats.
153 # In CSIT-1180, replace with List[float].
167 "metadata": { # Optional
168 "version": "VPP version",
169 "job": "Jenkins job name",
170 "build": "Information about the build"
174 "doc": "Suite 1 documentation",
175 "parent": "Suite 1 parent",
176 "level": "Level of the suite in the suite hierarchy"
179 "doc": "Suite N documentation",
180 "parent": "Suite 2 parent",
181 "level": "Level of the suite in the suite hierarchy"
187 "parent": "Name of the parent of the test",
188 "doc": "Test documentation"
189 "msg": "Test message"
190 "tags": ["tag 1", "tag 2", "tag n"],
191 "conf-history": "DUT1 and DUT2 VAT History"
192 "show-run": "Show Run"
193 "status": "PASS" | "FAIL"
201 .. note:: ID is the lowercase full path to the test.
204 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205 r'PLRsearch upper bound::?\s(\d+.\d+)')
207 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208 r'NDR_UPPER:\s(\d+.\d+).*\n'
209 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'PDR_UPPER:\s(\d+.\d+)')
212 # TODO: Remove when not needed
213 REGEX_NDRPDR_LAT_BASE = re.compile(
214 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
215 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
217 REGEX_NDRPDR_LAT = re.compile(
218 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
219 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
220 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
221 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
222 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
223 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
225 # TODO: Remove when not needed
226 REGEX_NDRPDR_LAT_LONG = re.compile(
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
229 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
230 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
231 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
238 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
241 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
242 r"VPP Version:\s*|VPP version:\s*)(.*)")
244 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
246 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
248 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
249 r'tx\s(\d*),\srx\s(\d*)')
251 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
252 r' in packets per second: \[(.*)\]')
254 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
255 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
257 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
259 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
261 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
263 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
265 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
267 def __init__(self, metadata, mapping, ignore):
270 :param metadata: Key-value pairs to be included in "metadata" part of
272 :param mapping: Mapping of the old names of test cases to the new
274 :param ignore: List of TCs to be ignored.
280 # Type of message to parse out from the test messages
281 self._msg_type = None
287 self._timestamp = None
289 # Testbed. The testbed is identified by TG node IP address.
292 # Mapping of TCs long names
293 self._mapping = mapping
296 self._ignore = ignore
298 # Number of PAPI History messages found:
300 # 1 - PAPI History of DUT1
301 # 2 - PAPI History of DUT2
302 self._lookup_kw_nr = 0
303 self._conf_history_lookup_nr = 0
305 # Number of Show Running messages found
307 # 1 - Show run message found
308 self._show_run_lookup_nr = 0
310 # Test ID of currently processed test- the lowercase full path to the
314 # The main data structure
316 u"metadata": OrderedDict(),
317 u"suites": OrderedDict(),
318 u"tests": OrderedDict()
321 # Save the provided metadata
322 for key, val in metadata.items():
323 self._data[u"metadata"][key] = val
325 # Dictionary defining the methods used to parse different types of
328 u"timestamp": self._get_timestamp,
329 u"vpp-version": self._get_vpp_version,
330 u"dpdk-version": self._get_dpdk_version,
331 # TODO: Remove when not needed:
332 u"teardown-vat-history": self._get_vat_history,
333 u"teardown-papi-history": self._get_papi_history,
334 u"test-show-runtime": self._get_show_run,
335 u"testbed": self._get_testbed
340 """Getter - Data parsed from the XML file.
342 :returns: Data parsed from the XML file.
347 def _get_testbed(self, msg):
348 """Called when extraction of testbed IP is required.
349 The testbed is identified by TG node IP address.
351 :param msg: Message to process.
356 if msg.message.count(u"Setup of TG node") or \
357 msg.message.count(u"Setup of node TG host"):
358 reg_tg_ip = re.compile(
359 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
361 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
362 except (KeyError, ValueError, IndexError, AttributeError):
365 self._data[u"metadata"][u"testbed"] = self._testbed
366 self._msg_type = None
368 def _get_vpp_version(self, msg):
369 """Called when extraction of VPP version is required.
371 :param msg: Message to process.
376 if msg.message.count(u"return STDOUT Version:") or \
377 msg.message.count(u"VPP Version:") or \
378 msg.message.count(u"VPP version:"):
379 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
381 self._data[u"metadata"][u"version"] = self._version
382 self._msg_type = None
384 def _get_dpdk_version(self, msg):
385 """Called when extraction of DPDK version is required.
387 :param msg: Message to process.
392 if msg.message.count(u"DPDK Version:"):
394 self._version = str(re.search(
395 self.REGEX_VERSION_DPDK, msg.message).group(2))
396 self._data[u"metadata"][u"version"] = self._version
400 self._msg_type = None
402 def _get_timestamp(self, msg):
403 """Called when extraction of timestamp is required.
405 :param msg: Message to process.
410 self._timestamp = msg.timestamp[:14]
411 self._data[u"metadata"][u"generated"] = self._timestamp
412 self._msg_type = None
414 def _get_vat_history(self, msg):
415 """Called when extraction of VAT command history is required.
417 TODO: Remove when not needed.
419 :param msg: Message to process.
423 if msg.message.count(u"VAT command history:"):
424 self._conf_history_lookup_nr += 1
425 if self._conf_history_lookup_nr == 1:
426 self._data[u"tests"][self._test_id][u"conf-history"] = str()
428 self._msg_type = None
429 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
430 r"VAT command history:", u"",
431 msg.message, count=1).replace(u'\n', u' |br| ').\
434 self._data[u"tests"][self._test_id][u"conf-history"] += (
435 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
438 def _get_papi_history(self, msg):
439 """Called when extraction of PAPI command history is required.
441 :param msg: Message to process.
445 if msg.message.count(u"PAPI command history:"):
446 self._conf_history_lookup_nr += 1
447 if self._conf_history_lookup_nr == 1:
448 self._data[u"tests"][self._test_id][u"conf-history"] = str()
450 self._msg_type = None
451 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
452 r"PAPI command history:", u"",
453 msg.message, count=1).replace(u'\n', u' |br| ').\
455 self._data[u"tests"][self._test_id][u"conf-history"] += (
456 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
459 def _get_show_run(self, msg):
460 """Called when extraction of VPP operational data (output of CLI command
461 Show Runtime) is required.
463 :param msg: Message to process.
468 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
469 self._data[u"tests"][self._test_id][u"show-run"] = str()
471 if msg.message.count(u"stats runtime") or \
472 msg.message.count(u"Runtime"):
474 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
476 except (AttributeError, IndexError):
477 host = self._data[u"tests"][self._test_id][u"show-run"].\
480 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
482 socket = f"/{socket}"
483 except (AttributeError, IndexError):
490 replace(u'b"', u'"').
491 replace(u'u"', u'"').
495 threads_nr = len(runtime[0][u"clocks"])
496 except (IndexError, KeyError):
506 table = [[tbl_hdr, ] for _ in range(threads_nr)]
508 for idx in range(threads_nr):
509 name = format(item[u"name"])
510 calls = format(item[u"calls"][idx])
511 vectors = format(item[u"vectors"][idx])
512 suspends = format(item[u"suspends"][idx])
513 if item[u"vectors"][idx] > 0:
515 item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
516 elif item[u"calls"][idx] > 0:
518 item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
519 elif item[u"suspends"][idx] > 0:
521 item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
524 if item[u"calls"][idx] > 0:
525 vectors_call = format(
526 item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
528 vectors_call = format(0, u".2f")
529 if int(calls) + int(vectors) + int(suspends):
531 name, calls, vectors, suspends, clocks, vectors_call
534 for idx in range(threads_nr):
535 text += f"Thread {idx} "
536 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
538 for row in table[idx]:
539 if txt_table is None:
540 txt_table = prettytable.PrettyTable(row)
543 txt_table.add_row(row)
544 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
545 txt_table.align[u"Name"] = u"l"
546 txt_table.align[u"Calls"] = u"r"
547 txt_table.align[u"Vectors"] = u"r"
548 txt_table.align[u"Suspends"] = u"r"
549 txt_table.align[u"Clocks"] = u"r"
550 txt_table.align[u"Vectors/Calls"] = u"r"
552 text += txt_table.get_string(sortby=u"Name") + u'\n'
553 text = f"\n**DUT: {host}{socket}**\n{text}".\
554 replace(u'\n', u' |br| ').\
555 replace(u'\r', u'').\
557 self._data[u"tests"][self._test_id][u"show-run"] += text
559 def _get_ndrpdr_throughput(self, msg):
560 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
563 :param msg: The test message to be parsed.
565 :returns: Parsed data as a dict and the status (PASS/FAIL).
566 :rtype: tuple(dict, str)
570 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
571 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
574 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
576 if groups is not None:
578 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
579 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
580 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
581 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
583 except (IndexError, ValueError):
586 return throughput, status
588 def _get_plr_throughput(self, msg):
589 """Get PLRsearch lower bound and PLRsearch upper bound from the test
592 :param msg: The test message to be parsed.
594 :returns: Parsed data as a dict and the status (PASS/FAIL).
595 :rtype: tuple(dict, str)
603 groups = re.search(self.REGEX_PLR_RATE, msg)
605 if groups is not None:
607 throughput[u"LOWER"] = float(groups.group(1))
608 throughput[u"UPPER"] = float(groups.group(2))
610 except (IndexError, ValueError):
613 return throughput, status
615 def _get_ndrpdr_latency(self, msg):
616 """Get LATENCY from the test message.
618 :param msg: The test message to be parsed.
620 :returns: Parsed data as a dict and the status (PASS/FAIL).
621 :rtype: tuple(dict, str)
631 u"direction1": copy.copy(latency_default),
632 u"direction2": copy.copy(latency_default)
635 u"direction1": copy.copy(latency_default),
636 u"direction2": copy.copy(latency_default)
639 u"direction1": copy.copy(latency_default),
640 u"direction2": copy.copy(latency_default)
643 u"direction1": copy.copy(latency_default),
644 u"direction2": copy.copy(latency_default)
647 u"direction1": copy.copy(latency_default),
648 u"direction2": copy.copy(latency_default)
651 u"direction1": copy.copy(latency_default),
652 u"direction2": copy.copy(latency_default)
656 # TODO: Rewrite when long and base are not needed
657 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
659 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
661 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
663 return latency, u"FAIL"
665 def process_latency(in_str):
666 """Return object with parsed latency values.
668 TODO: Define class for the return type.
670 :param in_str: Input string, min/avg/max/hdrh format.
672 :returns: Dict with corresponding keys, except hdrh float values.
674 :throws IndexError: If in_str does not have enough substrings.
675 :throws ValueError: If a substring does not convert to float.
677 in_list = in_str.split('/', 3)
680 u"min": float(in_list[0]),
681 u"avg": float(in_list[1]),
682 u"max": float(in_list[2]),
686 if len(in_list) == 4:
687 rval[u"hdrh"] = str(in_list[3])
692 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
693 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
694 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
695 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
696 if groups.lastindex == 4:
697 return latency, u"PASS"
698 except (IndexError, ValueError):
702 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
703 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
704 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
705 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
706 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
707 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
708 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
709 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
710 if groups.lastindex == 12:
711 return latency, u"PASS"
712 except (IndexError, ValueError):
715 # TODO: Remove when not needed
716 latency[u"NDR10"] = {
717 u"direction1": copy.copy(latency_default),
718 u"direction2": copy.copy(latency_default)
720 latency[u"NDR50"] = {
721 u"direction1": copy.copy(latency_default),
722 u"direction2": copy.copy(latency_default)
724 latency[u"NDR90"] = {
725 u"direction1": copy.copy(latency_default),
726 u"direction2": copy.copy(latency_default)
729 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
730 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
731 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
732 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
733 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
734 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
735 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
736 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
737 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
738 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
739 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
740 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
741 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
742 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
743 return latency, u"PASS"
744 except (IndexError, ValueError):
747 return latency, u"FAIL"
749 def visit_suite(self, suite):
750 """Implements traversing through the suite and its direct children.
752 :param suite: Suite to process.
756 if self.start_suite(suite) is not False:
757 suite.suites.visit(self)
758 suite.tests.visit(self)
759 self.end_suite(suite)
761 def start_suite(self, suite):
762 """Called when suite starts.
764 :param suite: Suite to process.
770 parent_name = suite.parent.name
771 except AttributeError:
774 doc_str = suite.doc.\
775 replace(u'"', u"'").\
776 replace(u'\n', u' ').\
777 replace(u'\r', u'').\
778 replace(u'*[', u' |br| *[').\
779 replace(u"*", u"**").\
780 replace(u' |br| *[', u'*[', 1)
782 self._data[u"suites"][suite.longname.lower().
784 replace(u" ", u"_")] = {
785 u"name": suite.name.lower(),
787 u"parent": parent_name,
788 u"level": len(suite.longname.split(u"."))
791 suite.keywords.visit(self)
793 def end_suite(self, suite):
794 """Called when suite ends.
796 :param suite: Suite to process.
801 def visit_test(self, test):
802 """Implements traversing through the test.
804 :param test: Test to process.
808 if self.start_test(test) is not False:
809 test.keywords.visit(self)
812 def start_test(self, test):
813 """Called when test starts.
815 :param test: Test to process.
820 longname_orig = test.longname.lower()
822 # Check the ignore list
823 if longname_orig in self._ignore:
826 tags = [str(tag) for tag in test.tags]
829 # Change the TC long name and name if defined in the mapping table
830 longname = self._mapping.get(longname_orig, None)
831 if longname is not None:
832 name = longname.split(u'.')[-1]
834 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
838 longname = longname_orig
839 name = test.name.lower()
841 # Remove TC number from the TC long name (backward compatibility):
842 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
843 # Remove TC number from the TC name (not needed):
844 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
846 test_result[u"parent"] = test.parent.name.lower()
847 test_result[u"tags"] = tags
848 test_result["doc"] = test.doc.\
849 replace(u'"', u"'").\
850 replace(u'\n', u' ').\
851 replace(u'\r', u'').\
852 replace(u'[', u' |br| [').\
853 replace(u' |br| [', u'[', 1)
854 test_result[u"msg"] = test.message.\
855 replace(u'\n', u' |br| ').\
856 replace(u'\r', u'').\
858 test_result[u"type"] = u"FUNC"
859 test_result[u"status"] = test.status
861 if u"PERFTEST" in tags:
862 # Replace info about cores (e.g. -1c-) with the info about threads
863 # and cores (e.g. -1t1c-) in the long test case names and in the
864 # test case names if necessary.
865 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
869 for tag in test_result[u"tags"]:
870 groups = re.search(self.REGEX_TC_TAG, tag)
876 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
877 f"-{tag_tc.lower()}-",
880 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
881 f"-{tag_tc.lower()}-",
885 test_result[u"status"] = u"FAIL"
886 self._data[u"tests"][self._test_id] = test_result
888 f"The test {self._test_id} has no or more than one "
889 f"multi-threading tags.\n"
890 f"Tags: {test_result[u'tags']}"
894 if test.status == u"PASS":
895 if u"NDRPDR" in tags:
896 test_result[u"type"] = u"NDRPDR"
897 test_result[u"throughput"], test_result[u"status"] = \
898 self._get_ndrpdr_throughput(test.message)
899 test_result[u"latency"], test_result[u"status"] = \
900 self._get_ndrpdr_latency(test.message)
901 elif u"SOAK" in tags:
902 test_result[u"type"] = u"SOAK"
903 test_result[u"throughput"], test_result[u"status"] = \
904 self._get_plr_throughput(test.message)
906 test_result[u"type"] = u"TCP"
907 groups = re.search(self.REGEX_TCP, test.message)
908 test_result[u"result"] = int(groups.group(2))
909 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
911 test_result[u"type"] = u"MRR"
913 test_result[u"type"] = u"BMRR"
915 test_result[u"result"] = dict()
916 groups = re.search(self.REGEX_BMRR, test.message)
917 if groups is not None:
918 items_str = groups.group(1)
919 items_float = [float(item.strip()) for item
920 in items_str.split(",")]
921 # Use whole list in CSIT-1180.
922 stats = jumpavg.AvgStdevStats.for_runs(items_float)
923 test_result[u"result"][u"receive-rate"] = stats.avg
925 groups = re.search(self.REGEX_MRR, test.message)
926 test_result[u"result"][u"receive-rate"] = \
927 float(groups.group(3)) / float(groups.group(1))
928 elif u"RECONF" in tags:
929 test_result[u"type"] = u"RECONF"
930 test_result[u"result"] = None
932 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
933 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
934 test_result[u"result"] = {
935 u"loss": int(grps_loss.group(1)),
936 u"time": float(grps_time.group(1))
938 except (AttributeError, IndexError, ValueError, TypeError):
939 test_result[u"status"] = u"FAIL"
941 test_result[u"status"] = u"FAIL"
942 self._data[u"tests"][self._test_id] = test_result
945 self._data[u"tests"][self._test_id] = test_result
947 def end_test(self, test):
948 """Called when test ends.
950 :param test: Test to process.
955 def visit_keyword(self, keyword):
956 """Implements traversing through the keyword and its child keywords.
958 :param keyword: Keyword to process.
959 :type keyword: Keyword
962 if self.start_keyword(keyword) is not False:
963 self.end_keyword(keyword)
965 def start_keyword(self, keyword):
966 """Called when keyword starts. Default implementation does nothing.
968 :param keyword: Keyword to process.
969 :type keyword: Keyword
973 if keyword.type == u"setup":
974 self.visit_setup_kw(keyword)
975 elif keyword.type == u"teardown":
976 self._lookup_kw_nr = 0
977 self.visit_teardown_kw(keyword)
979 self._lookup_kw_nr = 0
980 self.visit_test_kw(keyword)
981 except AttributeError:
984 def end_keyword(self, keyword):
985 """Called when keyword ends. Default implementation does nothing.
987 :param keyword: Keyword to process.
988 :type keyword: Keyword
992 def visit_test_kw(self, test_kw):
993 """Implements traversing through the test keyword and its child
996 :param test_kw: Keyword to process.
997 :type test_kw: Keyword
1000 for keyword in test_kw.keywords:
1001 if self.start_test_kw(keyword) is not False:
1002 self.visit_test_kw(keyword)
1003 self.end_test_kw(keyword)
1005 def start_test_kw(self, test_kw):
1006 """Called when test keyword starts. Default implementation does
1009 :param test_kw: Keyword to process.
1010 :type test_kw: Keyword
1013 if test_kw.name.count(u"Show Runtime On All Duts") or \
1014 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1015 self._lookup_kw_nr += 1
1016 self._show_run_lookup_nr = 0
1017 self._msg_type = u"test-show-runtime"
1018 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1019 self._msg_type = u"dpdk-version"
1022 test_kw.messages.visit(self)
1024 def end_test_kw(self, test_kw):
1025 """Called when keyword ends. Default implementation does nothing.
1027 :param test_kw: Keyword to process.
1028 :type test_kw: Keyword
1032 def visit_setup_kw(self, setup_kw):
1033 """Implements traversing through the teardown keyword and its child
1036 :param setup_kw: Keyword to process.
1037 :type setup_kw: Keyword
1040 for keyword in setup_kw.keywords:
1041 if self.start_setup_kw(keyword) is not False:
1042 self.visit_setup_kw(keyword)
1043 self.end_setup_kw(keyword)
1045 def start_setup_kw(self, setup_kw):
1046 """Called when teardown keyword starts. Default implementation does
1049 :param setup_kw: Keyword to process.
1050 :type setup_kw: Keyword
1053 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1054 and not self._version:
1055 self._msg_type = u"vpp-version"
1056 elif setup_kw.name.count(u"Set Global Variable") \
1057 and not self._timestamp:
1058 self._msg_type = u"timestamp"
1059 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1060 self._msg_type = u"testbed"
1063 setup_kw.messages.visit(self)
1065 def end_setup_kw(self, setup_kw):
1066 """Called when keyword ends. Default implementation does nothing.
1068 :param setup_kw: Keyword to process.
1069 :type setup_kw: Keyword
1073 def visit_teardown_kw(self, teardown_kw):
1074 """Implements traversing through the teardown keyword and its child
1077 :param teardown_kw: Keyword to process.
1078 :type teardown_kw: Keyword
1081 for keyword in teardown_kw.keywords:
1082 if self.start_teardown_kw(keyword) is not False:
1083 self.visit_teardown_kw(keyword)
1084 self.end_teardown_kw(keyword)
1086 def start_teardown_kw(self, teardown_kw):
1087 """Called when teardown keyword starts
1089 :param teardown_kw: Keyword to process.
1090 :type teardown_kw: Keyword
1094 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1095 # TODO: Remove when not needed:
1096 self._conf_history_lookup_nr = 0
1097 self._msg_type = u"teardown-vat-history"
1098 teardown_kw.messages.visit(self)
1099 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1100 self._conf_history_lookup_nr = 0
1101 self._msg_type = u"teardown-papi-history"
1102 teardown_kw.messages.visit(self)
1104 def end_teardown_kw(self, teardown_kw):
1105 """Called when keyword ends. Default implementation does nothing.
1107 :param teardown_kw: Keyword to process.
1108 :type teardown_kw: Keyword
1112 def visit_message(self, msg):
1113 """Implements visiting the message.
1115 :param msg: Message to process.
1119 if self.start_message(msg) is not False:
1120 self.end_message(msg)
1122 def start_message(self, msg):
1123 """Called when message starts. Get required information from messages:
1126 :param msg: Message to process.
1132 self.parse_msg[self._msg_type](msg)
1134 def end_message(self, msg):
1135 """Called when message ends. Default implementation does nothing.
1137 :param msg: Message to process.
1146 The data is extracted from output.xml files generated by Jenkins jobs and
1147 stored in pandas' DataFrames.
1153 (as described in ExecutionChecker documentation)
1155 (as described in ExecutionChecker documentation)
1157 (as described in ExecutionChecker documentation)
1160 def __init__(self, spec):
1163 :param spec: Specification.
1164 :type spec: Specification
1171 self._input_data = pd.Series()
1175 """Getter - Input data.
1177 :returns: Input data
1178 :rtype: pandas.Series
1180 return self._input_data
1182 def metadata(self, job, build):
1183 """Getter - metadata
1185 :param job: Job which metadata we want.
1186 :param build: Build which metadata we want.
1190 :rtype: pandas.Series
1193 return self.data[job][build][u"metadata"]
1195 def suites(self, job, build):
1198 :param job: Job which suites we want.
1199 :param build: Build which suites we want.
1203 :rtype: pandas.Series
1206 return self.data[job][str(build)][u"suites"]
1208 def tests(self, job, build):
1211 :param job: Job which tests we want.
1212 :param build: Build which tests we want.
1216 :rtype: pandas.Series
1219 return self.data[job][build][u"tests"]
1221 def _parse_tests(self, job, build, log):
1222 """Process data from robot output.xml file and return JSON structured
1225 :param job: The name of job which build output data will be processed.
1226 :param build: The build which output data will be processed.
1227 :param log: List of log messages.
1230 :type log: list of tuples (severity, msg)
1231 :returns: JSON data structure.
1240 with open(build[u"file-name"], u'r') as data_file:
1242 result = ExecutionResult(data_file)
1243 except errors.DataError as err:
1245 (u"ERROR", f"Error occurred while parsing output.xml: "
1249 checker = ExecutionChecker(metadata, self._cfg.mapping,
1251 result.visit(checker)
1255 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1256 """Download and parse the input data file.
1258 :param pid: PID of the process executing this method.
1259 :param job: Name of the Jenkins job which generated the processed input
1261 :param build: Information about the Jenkins build which generated the
1262 processed input file.
1263 :param repeat: Repeat the download specified number of times if not
1274 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1282 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1290 f"It is not possible to download the input data file from the "
1291 f"job {job}, build {build[u'build']}, or it is damaged. "
1297 f" Processing data from the build {build[u'build']} ...")
1299 data = self._parse_tests(job, build, logs)
1303 f"Input data file from the job {job}, build "
1304 f"{build[u'build']} is damaged. Skipped.")
1307 state = u"processed"
1310 remove(build[u"file-name"])
1311 except OSError as err:
1313 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1317 # If the time-period is defined in the specification file, remove all
1318 # files which are outside the time period.
1319 timeperiod = self._cfg.input.get(u"time-period", None)
1320 if timeperiod and data:
1322 timeperiod = timedelta(int(timeperiod))
1323 metadata = data.get(u"metadata", None)
1325 generated = metadata.get(u"generated", None)
1327 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1328 if (now - generated) > timeperiod:
1329 # Remove the data and the file:
1334 f" The build {job}/{build[u'build']} is "
1335 f"outdated, will be removed.")
1337 logs.append((u"INFO", u" Done."))
1339 for level, line in logs:
1340 if level == u"INFO":
1342 elif level == u"ERROR":
1344 elif level == u"DEBUG":
1346 elif level == u"CRITICAL":
1347 logging.critical(line)
1348 elif level == u"WARNING":
1349 logging.warning(line)
1351 return {u"data": data, u"state": state, u"job": job, u"build": build}
1353 def download_and_parse_data(self, repeat=1):
1354 """Download the input data files, parse input data from input files and
1355 store in pandas' Series.
1357 :param repeat: Repeat the download specified number of times if not
1362 logging.info(u"Downloading and parsing input files ...")
1364 for job, builds in self._cfg.builds.items():
1365 for build in builds:
1367 result = self._download_and_parse_build(job, build, repeat)
1368 build_nr = result[u"build"][u"build"]
1371 data = result[u"data"]
1372 build_data = pd.Series({
1373 u"metadata": pd.Series(
1374 list(data[u"metadata"].values()),
1375 index=list(data[u"metadata"].keys())
1377 u"suites": pd.Series(
1378 list(data[u"suites"].values()),
1379 index=list(data[u"suites"].keys())
1381 u"tests": pd.Series(
1382 list(data[u"tests"].values()),
1383 index=list(data[u"tests"].keys())
1387 if self._input_data.get(job, None) is None:
1388 self._input_data[job] = pd.Series()
1389 self._input_data[job][str(build_nr)] = build_data
1391 self._cfg.set_input_file_name(
1392 job, build_nr, result[u"build"][u"file-name"])
1394 self._cfg.set_input_state(job, build_nr, result[u"state"])
1397 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1398 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1400 logging.info(u"Done.")
1403 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1404 """Return the index of character in the string which is the end of tag.
1406 :param tag_filter: The string where the end of tag is being searched.
1407 :param start: The index where the searching is stated.
1408 :param closer: The character which is the tag closer.
1409 :type tag_filter: str
1412 :returns: The index of the tag closer.
1417 idx_opener = tag_filter.index(closer, start)
1418 return tag_filter.index(closer, idx_opener + 1)
1423 def _condition(tag_filter):
1424 """Create a conditional statement from the given tag filter.
1426 :param tag_filter: Filter based on tags from the element specification.
1427 :type tag_filter: str
1428 :returns: Conditional statement which can be evaluated.
1434 index = InputData._end_of_tag(tag_filter, index)
1438 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1440 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1441 continue_on_error=False):
1442 """Filter required data from the given jobs and builds.
1444 The output data structure is:
1448 - test (or suite) 1 ID:
1454 - test (or suite) n ID:
1461 :param element: Element which will use the filtered data.
1462 :param params: Parameters which will be included in the output. If None,
1463 all parameters are included.
1464 :param data: If not None, this data is used instead of data specified
1466 :param data_set: The set of data to be filtered: tests, suites,
1468 :param continue_on_error: Continue if there is error while reading the
1469 data. The Item will be empty then
1470 :type element: pandas.Series
1474 :type continue_on_error: bool
1475 :returns: Filtered data.
1476 :rtype pandas.Series
1480 if element[u"filter"] in (u"all", u"template"):
1483 cond = InputData._condition(element[u"filter"])
1484 logging.debug(f" Filter: {cond}")
1486 logging.error(u" No filter defined.")
1490 params = element.get(u"parameters", None)
1492 params.append(u"type")
1494 data_to_filter = data if data else element[u"data"]
1497 for job, builds in data_to_filter.items():
1498 data[job] = pd.Series()
1499 for build in builds:
1500 data[job][str(build)] = pd.Series()
1503 self.data[job][str(build)][data_set].items())
1505 if continue_on_error:
1509 for test_id, test_data in data_dict.items():
1510 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1511 data[job][str(build)][test_id] = pd.Series()
1513 for param, val in test_data.items():
1514 data[job][str(build)][test_id][param] = val
1516 for param in params:
1518 data[job][str(build)][test_id][param] =\
1521 data[job][str(build)][test_id][param] =\
1525 except (KeyError, IndexError, ValueError) as err:
1527 f"Missing mandatory parameter in the element specification: "
1531 except AttributeError as err:
1532 logging.error(repr(err))
1534 except SyntaxError as err:
1536 f"The filter {cond} is not correct. Check if all tags are "
1537 f"enclosed by apostrophes.\n{repr(err)}"
1541 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1542 continue_on_error=False):
1543 """Filter required data from the given jobs and builds.
1545 The output data structure is:
1549 - test (or suite) 1 ID:
1555 - test (or suite) n ID:
1562 :param element: Element which will use the filtered data.
1563 :param params: Parameters which will be included in the output. If None,
1564 all parameters are included.
1565 :param data_set: The set of data to be filtered: tests, suites,
1567 :param continue_on_error: Continue if there is error while reading the
1568 data. The Item will be empty then
1569 :type element: pandas.Series
1572 :type continue_on_error: bool
1573 :returns: Filtered data.
1574 :rtype pandas.Series
1577 include = element.get(u"include", None)
1579 logging.warning(u"No tests to include, skipping the element.")
1583 params = element.get(u"parameters", None)
1585 params.append(u"type")
1589 for job, builds in element[u"data"].items():
1590 data[job] = pd.Series()
1591 for build in builds:
1592 data[job][str(build)] = pd.Series()
1593 for test in include:
1595 reg_ex = re.compile(str(test).lower())
1596 for test_id in self.data[job][
1597 str(build)][data_set].keys():
1598 if re.match(reg_ex, str(test_id).lower()):
1599 test_data = self.data[job][
1600 str(build)][data_set][test_id]
1601 data[job][str(build)][test_id] = pd.Series()
1603 for param, val in test_data.items():
1604 data[job][str(build)][test_id]\
1607 for param in params:
1609 data[job][str(build)][
1613 data[job][str(build)][
1614 test_id][param] = u"No Data"
1615 except KeyError as err:
1616 logging.error(repr(err))
1617 if continue_on_error:
1622 except (KeyError, IndexError, ValueError) as err:
1624 f"Missing mandatory parameter in the element "
1625 f"specification: {repr(err)}"
1628 except AttributeError as err:
1629 logging.error(repr(err))
1633 def merge_data(data):
1634 """Merge data from more jobs and builds to a simple data structure.
1636 The output data structure is:
1638 - test (suite) 1 ID:
1644 - test (suite) n ID:
1647 :param data: Data to merge.
1648 :type data: pandas.Series
1649 :returns: Merged data.
1650 :rtype: pandas.Series
1653 logging.info(u" Merging data ...")
1655 merged_data = pd.Series()
1656 for builds in data.values:
1657 for item in builds.values:
1658 for item_id, item_data in item.items():
1659 merged_data[item_id] = item_data