1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
223 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
225 # TODO: Remove when not needed
226 REGEX_NDRPDR_LAT_BASE = re.compile(
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
230 REGEX_NDRPDR_LAT = re.compile(
231 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
232 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
236 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
238 # TODO: Remove when not needed
239 REGEX_NDRPDR_LAT_LONG = re.compile(
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
241 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
248 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
250 REGEX_VERSION_VPP = re.compile(
251 r"(return STDOUT Version:\s*|"
252 r"VPP Version:\s*|VPP version:\s*)(.*)"
254 REGEX_VERSION_DPDK = re.compile(
255 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
257 REGEX_TCP = re.compile(
258 r'Total\s(rps|cps|throughput):\s(\d*).*$'
260 REGEX_MRR = re.compile(
261 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
262 r'tx\s(\d*),\srx\s(\d*)'
264 REGEX_BMRR = re.compile(
265 r'Maximum Receive Rate trial results'
266 r' in packets per second: \[(.*)\]'
268 REGEX_RECONF_LOSS = re.compile(
269 r'Packets lost due to reconfig: (\d*)'
271 REGEX_RECONF_TIME = re.compile(
272 r'Implied time lost: (\d*.[\de-]*)'
274 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
276 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
278 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
280 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
282 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
284 def __init__(self, metadata, mapping, ignore):
287 :param metadata: Key-value pairs to be included in "metadata" part of
289 :param mapping: Mapping of the old names of test cases to the new
291 :param ignore: List of TCs to be ignored.
297 # Type of message to parse out from the test messages
298 self._msg_type = None
304 self._timestamp = None
306 # Testbed. The testbed is identified by TG node IP address.
309 # Mapping of TCs long names
310 self._mapping = mapping
313 self._ignore = ignore
315 # Number of PAPI History messages found:
317 # 1 - PAPI History of DUT1
318 # 2 - PAPI History of DUT2
319 self._conf_history_lookup_nr = 0
321 self._sh_run_counter = 0
323 # Test ID of currently processed test- the lowercase full path to the
327 # The main data structure
329 u"metadata": OrderedDict(),
330 u"suites": OrderedDict(),
331 u"tests": OrderedDict()
334 # Save the provided metadata
335 for key, val in metadata.items():
336 self._data[u"metadata"][key] = val
338 # Dictionary defining the methods used to parse different types of
341 u"timestamp": self._get_timestamp,
342 u"vpp-version": self._get_vpp_version,
343 u"dpdk-version": self._get_dpdk_version,
344 # TODO: Remove when not needed:
345 u"teardown-vat-history": self._get_vat_history,
346 u"teardown-papi-history": self._get_papi_history,
347 u"test-show-runtime": self._get_show_run,
348 u"testbed": self._get_testbed
353 """Getter - Data parsed from the XML file.
355 :returns: Data parsed from the XML file.
360 def _get_data_from_mrr_test_msg(self, msg):
361 """Get info from message of MRR performance tests.
363 :param msg: Message to be processed.
365 :returns: Processed message or original message if a problem occurs.
369 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
370 if not groups or groups.lastindex != 1:
374 data = groups.group(1).split(u", ")
375 except (AttributeError, IndexError, ValueError, KeyError):
381 out_str += f"{(float(item) / 1e6):.2f}, "
382 return out_str[:-2] + u"]"
383 except (AttributeError, IndexError, ValueError, KeyError):
386 def _get_data_from_perf_test_msg(self, msg):
387 """Get info from message of NDRPDR performance tests.
389 :param msg: Message to be processed.
391 :returns: Processed message or original message if a problem occurs.
395 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
396 if not groups or groups.lastindex != 10:
401 u"ndr_low": float(groups.group(1)),
402 u"ndr_low_b": float(groups.group(2)),
403 u"pdr_low": float(groups.group(3)),
404 u"pdr_low_b": float(groups.group(4)),
405 u"pdr_lat_90_1": groups.group(5),
406 u"pdr_lat_90_2": groups.group(6),
407 u"pdr_lat_50_1": groups.group(7),
408 u"pdr_lat_50_2": groups.group(8),
409 u"pdr_lat_10_1": groups.group(9),
410 u"pdr_lat_10_2": groups.group(10),
412 except (AttributeError, IndexError, ValueError, KeyError):
415 def _process_lat(in_str_1, in_str_2):
416 """Extract min, avg, max values from latency string.
418 :param in_str_1: Latency string for one direction produced by robot
420 :param in_str_2: Latency string for second direction produced by
424 :returns: Processed latency string or None if a problem occurs.
427 in_list_1 = in_str_1.split('/', 3)
428 in_list_2 = in_str_2.split('/', 3)
430 if len(in_list_1) != 4 and len(in_list_2) != 4:
433 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
435 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
436 except hdrh.codec.HdrLengthException:
439 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
441 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
442 except hdrh.codec.HdrLengthException:
445 if hdr_lat_1 and hdr_lat_2:
447 hdr_lat_1.get_value_at_percentile(50.0),
448 hdr_lat_1.get_value_at_percentile(90.0),
449 hdr_lat_1.get_value_at_percentile(99.0),
450 hdr_lat_2.get_value_at_percentile(50.0),
451 hdr_lat_2.get_value_at_percentile(90.0),
452 hdr_lat_2.get_value_at_percentile(99.0)
462 f"1. {(data[u'ndr_low'] / 1e6):.2f} "
463 f"{data[u'ndr_low_b']:.2f}"
464 f"\n2. {(data[u'pdr_low'] / 1e6):.2f} "
465 f"{data[u'pdr_low_b']:.2f}"
468 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
469 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
470 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
473 max_len = len(str(max((max(item) for item in latency))))
475 for idx, lat in enumerate(latency):
478 out_msg += f"\n{idx + 3}. "
479 for count, itm in enumerate(lat):
482 out_msg += u" " * (max_len - len(str(itm)) + 1)
487 except (AttributeError, IndexError, ValueError, KeyError):
490 def _get_testbed(self, msg):
491 """Called when extraction of testbed IP is required.
492 The testbed is identified by TG node IP address.
494 :param msg: Message to process.
499 if msg.message.count(u"Setup of TG node") or \
500 msg.message.count(u"Setup of node TG host"):
501 reg_tg_ip = re.compile(
502 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
504 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
505 except (KeyError, ValueError, IndexError, AttributeError):
508 self._data[u"metadata"][u"testbed"] = self._testbed
509 self._msg_type = None
511 def _get_vpp_version(self, msg):
512 """Called when extraction of VPP version is required.
514 :param msg: Message to process.
519 if msg.message.count(u"return STDOUT Version:") or \
520 msg.message.count(u"VPP Version:") or \
521 msg.message.count(u"VPP version:"):
522 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
524 self._data[u"metadata"][u"version"] = self._version
525 self._msg_type = None
527 def _get_dpdk_version(self, msg):
528 """Called when extraction of DPDK version is required.
530 :param msg: Message to process.
535 if msg.message.count(u"DPDK Version:"):
537 self._version = str(re.search(
538 self.REGEX_VERSION_DPDK, msg.message).group(2))
539 self._data[u"metadata"][u"version"] = self._version
543 self._msg_type = None
545 def _get_timestamp(self, msg):
546 """Called when extraction of timestamp is required.
548 :param msg: Message to process.
553 self._timestamp = msg.timestamp[:14]
554 self._data[u"metadata"][u"generated"] = self._timestamp
555 self._msg_type = None
557 def _get_vat_history(self, msg):
558 """Called when extraction of VAT command history is required.
560 TODO: Remove when not needed.
562 :param msg: Message to process.
566 if msg.message.count(u"VAT command history:"):
567 self._conf_history_lookup_nr += 1
568 if self._conf_history_lookup_nr == 1:
569 self._data[u"tests"][self._test_id][u"conf-history"] = str()
571 self._msg_type = None
572 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
573 r"VAT command history:", u"",
574 msg.message, count=1).replace(u'\n', u' |br| ').\
577 self._data[u"tests"][self._test_id][u"conf-history"] += (
578 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
581 def _get_papi_history(self, msg):
582 """Called when extraction of PAPI command history is required.
584 :param msg: Message to process.
588 if msg.message.count(u"PAPI command history:"):
589 self._conf_history_lookup_nr += 1
590 if self._conf_history_lookup_nr == 1:
591 self._data[u"tests"][self._test_id][u"conf-history"] = str()
593 self._msg_type = None
594 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
595 r"PAPI command history:", u"",
596 msg.message, count=1).replace(u'\n', u' |br| ').\
598 self._data[u"tests"][self._test_id][u"conf-history"] += (
599 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
602 def _get_show_run(self, msg):
603 """Called when extraction of VPP operational data (output of CLI command
604 Show Runtime) is required.
606 :param msg: Message to process.
611 if not msg.message.count(u"stats runtime"):
615 if self._sh_run_counter > 1:
618 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
619 self._data[u"tests"][self._test_id][u"show-run"] = dict()
621 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
625 host = groups.group(1)
626 except (AttributeError, IndexError):
629 sock = groups.group(2)
630 except (AttributeError, IndexError):
633 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
634 replace(u"'", u'"').replace(u'b"', u'"').
635 replace(u'u"', u'"').split(u":", 1)[1])
638 threads_nr = len(runtime[0][u"clocks"])
639 except (IndexError, KeyError):
642 dut = u"DUT{nr}".format(
643 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
648 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
652 for idx in range(threads_nr):
653 if item[u"vectors"][idx] > 0:
654 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
655 elif item[u"calls"][idx] > 0:
656 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
657 elif item[u"suspends"][idx] > 0:
658 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
662 if item[u"calls"][idx] > 0:
663 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
667 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
668 int(item[u"suspends"][idx]):
669 oper[u"threads"][idx].append([
672 item[u"vectors"][idx],
673 item[u"suspends"][idx],
678 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
680 def _get_ndrpdr_throughput(self, msg):
681 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
684 :param msg: The test message to be parsed.
686 :returns: Parsed data as a dict and the status (PASS/FAIL).
687 :rtype: tuple(dict, str)
691 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
692 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
695 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
697 if groups is not None:
699 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
700 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
701 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
702 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
704 except (IndexError, ValueError):
707 return throughput, status
709 def _get_plr_throughput(self, msg):
710 """Get PLRsearch lower bound and PLRsearch upper bound from the test
713 :param msg: The test message to be parsed.
715 :returns: Parsed data as a dict and the status (PASS/FAIL).
716 :rtype: tuple(dict, str)
724 groups = re.search(self.REGEX_PLR_RATE, msg)
726 if groups is not None:
728 throughput[u"LOWER"] = float(groups.group(1))
729 throughput[u"UPPER"] = float(groups.group(2))
731 except (IndexError, ValueError):
734 return throughput, status
736 def _get_ndrpdr_latency(self, msg):
737 """Get LATENCY from the test message.
739 :param msg: The test message to be parsed.
741 :returns: Parsed data as a dict and the status (PASS/FAIL).
742 :rtype: tuple(dict, str)
752 u"direction1": copy.copy(latency_default),
753 u"direction2": copy.copy(latency_default)
756 u"direction1": copy.copy(latency_default),
757 u"direction2": copy.copy(latency_default)
760 u"direction1": copy.copy(latency_default),
761 u"direction2": copy.copy(latency_default)
764 u"direction1": copy.copy(latency_default),
765 u"direction2": copy.copy(latency_default)
768 u"direction1": copy.copy(latency_default),
769 u"direction2": copy.copy(latency_default)
772 u"direction1": copy.copy(latency_default),
773 u"direction2": copy.copy(latency_default)
777 # TODO: Rewrite when long and base are not needed
778 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
780 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
782 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
784 return latency, u"FAIL"
786 def process_latency(in_str):
787 """Return object with parsed latency values.
789 TODO: Define class for the return type.
791 :param in_str: Input string, min/avg/max/hdrh format.
793 :returns: Dict with corresponding keys, except hdrh float values.
795 :throws IndexError: If in_str does not have enough substrings.
796 :throws ValueError: If a substring does not convert to float.
798 in_list = in_str.split('/', 3)
801 u"min": float(in_list[0]),
802 u"avg": float(in_list[1]),
803 u"max": float(in_list[2]),
807 if len(in_list) == 4:
808 rval[u"hdrh"] = str(in_list[3])
813 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
814 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
815 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
816 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
817 if groups.lastindex == 4:
818 return latency, u"PASS"
819 except (IndexError, ValueError):
823 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
824 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
825 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
826 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
827 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
828 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
829 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
830 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
831 if groups.lastindex == 12:
832 return latency, u"PASS"
833 except (IndexError, ValueError):
836 # TODO: Remove when not needed
837 latency[u"NDR10"] = {
838 u"direction1": copy.copy(latency_default),
839 u"direction2": copy.copy(latency_default)
841 latency[u"NDR50"] = {
842 u"direction1": copy.copy(latency_default),
843 u"direction2": copy.copy(latency_default)
845 latency[u"NDR90"] = {
846 u"direction1": copy.copy(latency_default),
847 u"direction2": copy.copy(latency_default)
850 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
851 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
852 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
853 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
854 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
855 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
856 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
857 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
858 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
859 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
860 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
861 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
862 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
863 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
864 return latency, u"PASS"
865 except (IndexError, ValueError):
868 return latency, u"FAIL"
870 def visit_suite(self, suite):
871 """Implements traversing through the suite and its direct children.
873 :param suite: Suite to process.
877 if self.start_suite(suite) is not False:
878 suite.suites.visit(self)
879 suite.tests.visit(self)
880 self.end_suite(suite)
882 def start_suite(self, suite):
883 """Called when suite starts.
885 :param suite: Suite to process.
891 parent_name = suite.parent.name
892 except AttributeError:
895 doc_str = suite.doc.\
896 replace(u'"', u"'").\
897 replace(u'\n', u' ').\
898 replace(u'\r', u'').\
899 replace(u'*[', u' |br| *[').\
900 replace(u"*", u"**").\
901 replace(u' |br| *[', u'*[', 1)
903 self._data[u"suites"][suite.longname.lower().
905 replace(u" ", u"_")] = {
906 u"name": suite.name.lower(),
908 u"parent": parent_name,
909 u"level": len(suite.longname.split(u"."))
912 suite.keywords.visit(self)
914 def end_suite(self, suite):
915 """Called when suite ends.
917 :param suite: Suite to process.
922 def visit_test(self, test):
923 """Implements traversing through the test.
925 :param test: Test to process.
929 if self.start_test(test) is not False:
930 test.keywords.visit(self)
933 def start_test(self, test):
934 """Called when test starts.
936 :param test: Test to process.
941 self._sh_run_counter = 0
943 longname_orig = test.longname.lower()
945 # Check the ignore list
946 if longname_orig in self._ignore:
949 tags = [str(tag) for tag in test.tags]
952 # Change the TC long name and name if defined in the mapping table
953 longname = self._mapping.get(longname_orig, None)
954 if longname is not None:
955 name = longname.split(u'.')[-1]
957 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
961 longname = longname_orig
962 name = test.name.lower()
964 # Remove TC number from the TC long name (backward compatibility):
965 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
966 # Remove TC number from the TC name (not needed):
967 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
969 test_result[u"parent"] = test.parent.name.lower()
970 test_result[u"tags"] = tags
971 test_result["doc"] = test.doc.\
972 replace(u'"', u"'").\
973 replace(u'\n', u' ').\
974 replace(u'\r', u'').\
975 replace(u'[', u' |br| [').\
976 replace(u' |br| [', u'[', 1)
977 test_result[u"msg"] = test.message.\
978 replace(u'\n', u' |br| ').\
979 replace(u'\r', u'').\
981 test_result[u"type"] = u"FUNC"
982 test_result[u"status"] = test.status
984 if u"PERFTEST" in tags:
985 # Replace info about cores (e.g. -1c-) with the info about threads
986 # and cores (e.g. -1t1c-) in the long test case names and in the
987 # test case names if necessary.
988 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
992 for tag in test_result[u"tags"]:
993 groups = re.search(self.REGEX_TC_TAG, tag)
999 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
1000 f"-{tag_tc.lower()}-",
1003 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
1004 f"-{tag_tc.lower()}-",
1005 test_result["name"],
1008 test_result[u"status"] = u"FAIL"
1009 self._data[u"tests"][self._test_id] = test_result
1011 f"The test {self._test_id} has no or more than one "
1012 f"multi-threading tags.\n"
1013 f"Tags: {test_result[u'tags']}"
1017 if test.status == u"PASS":
1018 if u"NDRPDR" in tags:
1019 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1021 replace(u'\n', u' |br| '). \
1022 replace(u'\r', u''). \
1024 test_result[u"type"] = u"NDRPDR"
1025 test_result[u"throughput"], test_result[u"status"] = \
1026 self._get_ndrpdr_throughput(test.message)
1027 test_result[u"latency"], test_result[u"status"] = \
1028 self._get_ndrpdr_latency(test.message)
1029 elif u"SOAK" in tags:
1030 test_result[u"type"] = u"SOAK"
1031 test_result[u"throughput"], test_result[u"status"] = \
1032 self._get_plr_throughput(test.message)
1033 elif u"TCP" in tags:
1034 test_result[u"type"] = u"TCP"
1035 groups = re.search(self.REGEX_TCP, test.message)
1036 test_result[u"result"] = int(groups.group(2))
1037 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1038 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1040 replace(u'\n', u' |br| '). \
1041 replace(u'\r', u''). \
1044 test_result[u"type"] = u"MRR"
1046 test_result[u"type"] = u"BMRR"
1048 test_result[u"result"] = dict()
1049 groups = re.search(self.REGEX_BMRR, test.message)
1050 if groups is not None:
1051 items_str = groups.group(1)
1052 items_float = [float(item.strip()) for item
1053 in items_str.split(",")]
1054 # Use whole list in CSIT-1180.
1055 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1056 test_result[u"result"][u"receive-rate"] = stats.avg
1058 groups = re.search(self.REGEX_MRR, test.message)
1059 test_result[u"result"][u"receive-rate"] = \
1060 float(groups.group(3)) / float(groups.group(1))
1061 elif u"RECONF" in tags:
1062 test_result[u"type"] = u"RECONF"
1063 test_result[u"result"] = None
1065 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1066 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1067 test_result[u"result"] = {
1068 u"loss": int(grps_loss.group(1)),
1069 u"time": float(grps_time.group(1))
1071 except (AttributeError, IndexError, ValueError, TypeError):
1072 test_result[u"status"] = u"FAIL"
1073 elif u"DEVICETEST" in tags:
1074 test_result[u"type"] = u"DEVICETEST"
1076 test_result[u"status"] = u"FAIL"
1077 self._data[u"tests"][self._test_id] = test_result
1080 self._data[u"tests"][self._test_id] = test_result
1082 def end_test(self, test):
1083 """Called when test ends.
1085 :param test: Test to process.
1090 def visit_keyword(self, keyword):
1091 """Implements traversing through the keyword and its child keywords.
1093 :param keyword: Keyword to process.
1094 :type keyword: Keyword
1097 if self.start_keyword(keyword) is not False:
1098 self.end_keyword(keyword)
1100 def start_keyword(self, keyword):
1101 """Called when keyword starts. Default implementation does nothing.
1103 :param keyword: Keyword to process.
1104 :type keyword: Keyword
1108 if keyword.type == u"setup":
1109 self.visit_setup_kw(keyword)
1110 elif keyword.type == u"teardown":
1111 self.visit_teardown_kw(keyword)
1113 self.visit_test_kw(keyword)
1114 except AttributeError:
1117 def end_keyword(self, keyword):
1118 """Called when keyword ends. Default implementation does nothing.
1120 :param keyword: Keyword to process.
1121 :type keyword: Keyword
1125 def visit_test_kw(self, test_kw):
1126 """Implements traversing through the test keyword and its child
1129 :param test_kw: Keyword to process.
1130 :type test_kw: Keyword
1133 for keyword in test_kw.keywords:
1134 if self.start_test_kw(keyword) is not False:
1135 self.visit_test_kw(keyword)
1136 self.end_test_kw(keyword)
1138 def start_test_kw(self, test_kw):
1139 """Called when test keyword starts. Default implementation does
1142 :param test_kw: Keyword to process.
1143 :type test_kw: Keyword
1146 if test_kw.name.count(u"Show Runtime On All Duts") or \
1147 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1148 self._msg_type = u"test-show-runtime"
1149 self._sh_run_counter += 1
1150 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1151 self._msg_type = u"dpdk-version"
1154 test_kw.messages.visit(self)
1156 def end_test_kw(self, test_kw):
1157 """Called when keyword ends. Default implementation does nothing.
1159 :param test_kw: Keyword to process.
1160 :type test_kw: Keyword
1164 def visit_setup_kw(self, setup_kw):
1165 """Implements traversing through the teardown keyword and its child
1168 :param setup_kw: Keyword to process.
1169 :type setup_kw: Keyword
1172 for keyword in setup_kw.keywords:
1173 if self.start_setup_kw(keyword) is not False:
1174 self.visit_setup_kw(keyword)
1175 self.end_setup_kw(keyword)
1177 def start_setup_kw(self, setup_kw):
1178 """Called when teardown keyword starts. Default implementation does
1181 :param setup_kw: Keyword to process.
1182 :type setup_kw: Keyword
1185 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1186 and not self._version:
1187 self._msg_type = u"vpp-version"
1188 elif setup_kw.name.count(u"Set Global Variable") \
1189 and not self._timestamp:
1190 self._msg_type = u"timestamp"
1191 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1192 self._msg_type = u"testbed"
1195 setup_kw.messages.visit(self)
1197 def end_setup_kw(self, setup_kw):
1198 """Called when keyword ends. Default implementation does nothing.
1200 :param setup_kw: Keyword to process.
1201 :type setup_kw: Keyword
1205 def visit_teardown_kw(self, teardown_kw):
1206 """Implements traversing through the teardown keyword and its child
1209 :param teardown_kw: Keyword to process.
1210 :type teardown_kw: Keyword
1213 for keyword in teardown_kw.keywords:
1214 if self.start_teardown_kw(keyword) is not False:
1215 self.visit_teardown_kw(keyword)
1216 self.end_teardown_kw(keyword)
1218 def start_teardown_kw(self, teardown_kw):
1219 """Called when teardown keyword starts
1221 :param teardown_kw: Keyword to process.
1222 :type teardown_kw: Keyword
1226 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1227 # TODO: Remove when not needed:
1228 self._conf_history_lookup_nr = 0
1229 self._msg_type = u"teardown-vat-history"
1230 teardown_kw.messages.visit(self)
1231 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1232 self._conf_history_lookup_nr = 0
1233 self._msg_type = u"teardown-papi-history"
1234 teardown_kw.messages.visit(self)
1236 def end_teardown_kw(self, teardown_kw):
1237 """Called when keyword ends. Default implementation does nothing.
1239 :param teardown_kw: Keyword to process.
1240 :type teardown_kw: Keyword
1244 def visit_message(self, msg):
1245 """Implements visiting the message.
1247 :param msg: Message to process.
1251 if self.start_message(msg) is not False:
1252 self.end_message(msg)
1254 def start_message(self, msg):
1255 """Called when message starts. Get required information from messages:
1258 :param msg: Message to process.
1264 self.parse_msg[self._msg_type](msg)
1266 def end_message(self, msg):
1267 """Called when message ends. Default implementation does nothing.
1269 :param msg: Message to process.
1278 The data is extracted from output.xml files generated by Jenkins jobs and
1279 stored in pandas' DataFrames.
1285 (as described in ExecutionChecker documentation)
1287 (as described in ExecutionChecker documentation)
1289 (as described in ExecutionChecker documentation)
1292 def __init__(self, spec):
1295 :param spec: Specification.
1296 :type spec: Specification
1303 self._input_data = pd.Series()
1307 """Getter - Input data.
1309 :returns: Input data
1310 :rtype: pandas.Series
1312 return self._input_data
1314 def metadata(self, job, build):
1315 """Getter - metadata
1317 :param job: Job which metadata we want.
1318 :param build: Build which metadata we want.
1322 :rtype: pandas.Series
1325 return self.data[job][build][u"metadata"]
1327 def suites(self, job, build):
1330 :param job: Job which suites we want.
1331 :param build: Build which suites we want.
1335 :rtype: pandas.Series
1338 return self.data[job][str(build)][u"suites"]
1340 def tests(self, job, build):
1343 :param job: Job which tests we want.
1344 :param build: Build which tests we want.
1348 :rtype: pandas.Series
1351 return self.data[job][build][u"tests"]
1353 def _parse_tests(self, job, build, log):
1354 """Process data from robot output.xml file and return JSON structured
1357 :param job: The name of job which build output data will be processed.
1358 :param build: The build which output data will be processed.
1359 :param log: List of log messages.
1362 :type log: list of tuples (severity, msg)
1363 :returns: JSON data structure.
1372 with open(build[u"file-name"], u'r') as data_file:
1374 result = ExecutionResult(data_file)
1375 except errors.DataError as err:
1377 (u"ERROR", f"Error occurred while parsing output.xml: "
1381 checker = ExecutionChecker(metadata, self._cfg.mapping,
1383 result.visit(checker)
1387 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1388 """Download and parse the input data file.
1390 :param pid: PID of the process executing this method.
1391 :param job: Name of the Jenkins job which generated the processed input
1393 :param build: Information about the Jenkins build which generated the
1394 processed input file.
1395 :param repeat: Repeat the download specified number of times if not
1406 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1414 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1422 f"It is not possible to download the input data file from the "
1423 f"job {job}, build {build[u'build']}, or it is damaged. "
1429 f" Processing data from the build {build[u'build']} ...")
1431 data = self._parse_tests(job, build, logs)
1435 f"Input data file from the job {job}, build "
1436 f"{build[u'build']} is damaged. Skipped.")
1439 state = u"processed"
1442 remove(build[u"file-name"])
1443 except OSError as err:
1445 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1449 # If the time-period is defined in the specification file, remove all
1450 # files which are outside the time period.
1451 timeperiod = self._cfg.input.get(u"time-period", None)
1452 if timeperiod and data:
1454 timeperiod = timedelta(int(timeperiod))
1455 metadata = data.get(u"metadata", None)
1457 generated = metadata.get(u"generated", None)
1459 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1460 if (now - generated) > timeperiod:
1461 # Remove the data and the file:
1466 f" The build {job}/{build[u'build']} is "
1467 f"outdated, will be removed.")
1469 logs.append((u"INFO", u" Done."))
1471 for level, line in logs:
1472 if level == u"INFO":
1474 elif level == u"ERROR":
1476 elif level == u"DEBUG":
1478 elif level == u"CRITICAL":
1479 logging.critical(line)
1480 elif level == u"WARNING":
1481 logging.warning(line)
1483 return {u"data": data, u"state": state, u"job": job, u"build": build}
1485 def download_and_parse_data(self, repeat=1):
1486 """Download the input data files, parse input data from input files and
1487 store in pandas' Series.
1489 :param repeat: Repeat the download specified number of times if not
1494 logging.info(u"Downloading and parsing input files ...")
1496 for job, builds in self._cfg.builds.items():
1497 for build in builds:
1499 result = self._download_and_parse_build(job, build, repeat)
1500 build_nr = result[u"build"][u"build"]
1503 data = result[u"data"]
1504 build_data = pd.Series({
1505 u"metadata": pd.Series(
1506 list(data[u"metadata"].values()),
1507 index=list(data[u"metadata"].keys())
1509 u"suites": pd.Series(
1510 list(data[u"suites"].values()),
1511 index=list(data[u"suites"].keys())
1513 u"tests": pd.Series(
1514 list(data[u"tests"].values()),
1515 index=list(data[u"tests"].keys())
1519 if self._input_data.get(job, None) is None:
1520 self._input_data[job] = pd.Series()
1521 self._input_data[job][str(build_nr)] = build_data
1523 self._cfg.set_input_file_name(
1524 job, build_nr, result[u"build"][u"file-name"])
1526 self._cfg.set_input_state(job, build_nr, result[u"state"])
1529 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1530 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1532 logging.info(u"Done.")
1535 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1536 """Return the index of character in the string which is the end of tag.
1538 :param tag_filter: The string where the end of tag is being searched.
1539 :param start: The index where the searching is stated.
1540 :param closer: The character which is the tag closer.
1541 :type tag_filter: str
1544 :returns: The index of the tag closer.
1549 idx_opener = tag_filter.index(closer, start)
1550 return tag_filter.index(closer, idx_opener + 1)
1555 def _condition(tag_filter):
1556 """Create a conditional statement from the given tag filter.
1558 :param tag_filter: Filter based on tags from the element specification.
1559 :type tag_filter: str
1560 :returns: Conditional statement which can be evaluated.
1566 index = InputData._end_of_tag(tag_filter, index)
1570 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1572 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1573 continue_on_error=False):
1574 """Filter required data from the given jobs and builds.
1576 The output data structure is:
1580 - test (or suite) 1 ID:
1586 - test (or suite) n ID:
1593 :param element: Element which will use the filtered data.
1594 :param params: Parameters which will be included in the output. If None,
1595 all parameters are included.
1596 :param data: If not None, this data is used instead of data specified
1598 :param data_set: The set of data to be filtered: tests, suites,
1600 :param continue_on_error: Continue if there is error while reading the
1601 data. The Item will be empty then
1602 :type element: pandas.Series
1606 :type continue_on_error: bool
1607 :returns: Filtered data.
1608 :rtype pandas.Series
1612 if data_set == "suites":
1614 elif element[u"filter"] in (u"all", u"template"):
1617 cond = InputData._condition(element[u"filter"])
1618 logging.debug(f" Filter: {cond}")
1620 logging.error(u" No filter defined.")
1624 params = element.get(u"parameters", None)
1626 params.append(u"type")
1628 data_to_filter = data if data else element[u"data"]
1631 for job, builds in data_to_filter.items():
1632 data[job] = pd.Series()
1633 for build in builds:
1634 data[job][str(build)] = pd.Series()
1637 self.data[job][str(build)][data_set].items())
1639 if continue_on_error:
1643 for test_id, test_data in data_dict.items():
1644 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1645 data[job][str(build)][test_id] = pd.Series()
1647 for param, val in test_data.items():
1648 data[job][str(build)][test_id][param] = val
1650 for param in params:
1652 data[job][str(build)][test_id][param] =\
1655 data[job][str(build)][test_id][param] =\
1659 except (KeyError, IndexError, ValueError) as err:
1661 f"Missing mandatory parameter in the element specification: "
1665 except AttributeError as err:
1666 logging.error(repr(err))
1668 except SyntaxError as err:
1670 f"The filter {cond} is not correct. Check if all tags are "
1671 f"enclosed by apostrophes.\n{repr(err)}"
1675 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1676 continue_on_error=False):
1677 """Filter required data from the given jobs and builds.
1679 The output data structure is:
1683 - test (or suite) 1 ID:
1689 - test (or suite) n ID:
1696 :param element: Element which will use the filtered data.
1697 :param params: Parameters which will be included in the output. If None,
1698 all parameters are included.
1699 :param data_set: The set of data to be filtered: tests, suites,
1701 :param continue_on_error: Continue if there is error while reading the
1702 data. The Item will be empty then
1703 :type element: pandas.Series
1706 :type continue_on_error: bool
1707 :returns: Filtered data.
1708 :rtype pandas.Series
1711 include = element.get(u"include", None)
1713 logging.warning(u"No tests to include, skipping the element.")
1717 params = element.get(u"parameters", None)
1719 params.append(u"type")
1723 for job, builds in element[u"data"].items():
1724 data[job] = pd.Series()
1725 for build in builds:
1726 data[job][str(build)] = pd.Series()
1727 for test in include:
1729 reg_ex = re.compile(str(test).lower())
1730 for test_id in self.data[job][
1731 str(build)][data_set].keys():
1732 if re.match(reg_ex, str(test_id).lower()):
1733 test_data = self.data[job][
1734 str(build)][data_set][test_id]
1735 data[job][str(build)][test_id] = pd.Series()
1737 for param, val in test_data.items():
1738 data[job][str(build)][test_id]\
1741 for param in params:
1743 data[job][str(build)][
1747 data[job][str(build)][
1748 test_id][param] = u"No Data"
1749 except KeyError as err:
1750 logging.error(repr(err))
1751 if continue_on_error:
1756 except (KeyError, IndexError, ValueError) as err:
1758 f"Missing mandatory parameter in the element "
1759 f"specification: {repr(err)}"
1762 except AttributeError as err:
1763 logging.error(repr(err))
1767 def merge_data(data):
1768 """Merge data from more jobs and builds to a simple data structure.
1770 The output data structure is:
1772 - test (suite) 1 ID:
1778 - test (suite) n ID:
1781 :param data: Data to merge.
1782 :type data: pandas.Series
1783 :returns: Merged data.
1784 :rtype: pandas.Series
1787 logging.info(u" Merging data ...")
1789 merged_data = pd.Series()
1790 for builds in data.values:
1791 for item in builds.values:
1792 for item_id, item_data in item.items():
1793 merged_data[item_id] = item_data
1797 def print_all_oper_data(self):
1798 """Print all operational data to console.
1806 u"Cycles per Packet",
1807 u"Average Vector Size"
1810 for job in self._input_data.values:
1811 for build in job.values:
1812 for test_id, test_data in build[u"tests"].items():
1814 if test_data.get(u"show-run", None) is None:
1816 for dut_name, data in test_data[u"show-run"].items():
1817 if data.get(u"threads", None) is None:
1819 print(f"Host IP: {data.get(u'host', '')}, "
1820 f"Socket: {data.get(u'socket', '')}")
1821 for thread_nr, thread in data[u"threads"].items():
1822 txt_table = prettytable.PrettyTable(tbl_hdr)
1825 txt_table.add_row(row)
1827 if len(thread) == 0:
1830 avg = f", Average Vector Size per Node: " \
1831 f"{(avg / len(thread)):.2f}"
1832 th_name = u"main" if thread_nr == 0 \
1833 else f"worker_{thread_nr}"
1834 print(f"{dut_name}, {th_name}{avg}")
1835 txt_table.float_format = u".2"
1836 txt_table.align = u"r"
1837 txt_table.align[u"Name"] = u"l"
1838 print(f"{txt_table.get_string()}\n")