1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
223 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
225 # TODO: Remove when not needed
226 REGEX_NDRPDR_LAT_BASE = re.compile(
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
230 REGEX_NDRPDR_LAT = re.compile(
231 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
232 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
236 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
238 # TODO: Remove when not needed
239 REGEX_NDRPDR_LAT_LONG = re.compile(
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
241 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
248 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
250 REGEX_VERSION_VPP = re.compile(
251 r"(return STDOUT Version:\s*|"
252 r"VPP Version:\s*|VPP version:\s*)(.*)"
254 REGEX_VERSION_DPDK = re.compile(
255 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
257 REGEX_TCP = re.compile(
258 r'Total\s(rps|cps|throughput):\s(\d*).*$'
260 REGEX_MRR = re.compile(
261 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
262 r'tx\s(\d*),\srx\s(\d*)'
264 REGEX_BMRR = re.compile(
265 r'Maximum Receive Rate trial results'
266 r' in packets per second: \[(.*)\]'
268 REGEX_RECONF_LOSS = re.compile(
269 r'Packets lost due to reconfig: (\d*)'
271 REGEX_RECONF_TIME = re.compile(
272 r'Implied time lost: (\d*.[\de-]*)'
274 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
276 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
278 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
280 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
282 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
284 def __init__(self, metadata, mapping, ignore):
287 :param metadata: Key-value pairs to be included in "metadata" part of
289 :param mapping: Mapping of the old names of test cases to the new
291 :param ignore: List of TCs to be ignored.
297 # Type of message to parse out from the test messages
298 self._msg_type = None
304 self._timestamp = None
306 # Testbed. The testbed is identified by TG node IP address.
309 # Mapping of TCs long names
310 self._mapping = mapping
313 self._ignore = ignore
315 # Number of PAPI History messages found:
317 # 1 - PAPI History of DUT1
318 # 2 - PAPI History of DUT2
319 self._conf_history_lookup_nr = 0
321 self._sh_run_counter = 0
323 # Test ID of currently processed test- the lowercase full path to the
327 # The main data structure
329 u"metadata": OrderedDict(),
330 u"suites": OrderedDict(),
331 u"tests": OrderedDict()
334 # Save the provided metadata
335 for key, val in metadata.items():
336 self._data[u"metadata"][key] = val
338 # Dictionary defining the methods used to parse different types of
341 u"timestamp": self._get_timestamp,
342 u"vpp-version": self._get_vpp_version,
343 u"dpdk-version": self._get_dpdk_version,
344 # TODO: Remove when not needed:
345 u"teardown-vat-history": self._get_vat_history,
346 u"teardown-papi-history": self._get_papi_history,
347 u"test-show-runtime": self._get_show_run,
348 u"testbed": self._get_testbed
353 """Getter - Data parsed from the XML file.
355 :returns: Data parsed from the XML file.
360 def _get_data_from_mrr_test_msg(self, msg):
361 """Get info from message of MRR performance tests.
363 :param msg: Message to be processed.
365 :returns: Processed message or original message if a problem occurs.
369 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
370 if not groups or groups.lastindex != 1:
371 return u"Test Failed."
374 data = groups.group(1).split(u", ")
375 except (AttributeError, IndexError, ValueError, KeyError):
376 return u"Test Failed."
381 out_str += f"{(float(item) / 1e6):.2f}, "
382 return out_str[:-2] + u"]"
383 except (AttributeError, IndexError, ValueError, KeyError):
384 return u"Test Failed."
386 def _get_data_from_perf_test_msg(self, msg):
387 """Get info from message of NDRPDR performance tests.
389 :param msg: Message to be processed.
391 :returns: Processed message or original message if a problem occurs.
395 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
396 if not groups or groups.lastindex != 10:
397 return u"Test Failed."
401 u"ndr_low": float(groups.group(1)),
402 u"ndr_low_b": float(groups.group(2)),
403 u"pdr_low": float(groups.group(3)),
404 u"pdr_low_b": float(groups.group(4)),
405 u"pdr_lat_90_1": groups.group(5),
406 u"pdr_lat_90_2": groups.group(6),
407 u"pdr_lat_50_1": groups.group(7),
408 u"pdr_lat_50_2": groups.group(8),
409 u"pdr_lat_10_1": groups.group(9),
410 u"pdr_lat_10_2": groups.group(10),
412 except (AttributeError, IndexError, ValueError, KeyError):
413 return u"Test Failed."
415 def _process_lat(in_str_1, in_str_2):
416 """Extract min, avg, max values from latency string.
418 :param in_str_1: Latency string for one direction produced by robot
420 :param in_str_2: Latency string for second direction produced by
424 :returns: Processed latency string or None if a problem occurs.
427 in_list_1 = in_str_1.split('/', 3)
428 in_list_2 = in_str_2.split('/', 3)
430 if len(in_list_1) != 4 and len(in_list_2) != 4:
433 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
435 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
436 except hdrh.codec.HdrLengthException:
439 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
441 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
442 except hdrh.codec.HdrLengthException:
445 if hdr_lat_1 and hdr_lat_2:
447 hdr_lat_1.get_value_at_percentile(50.0),
448 hdr_lat_1.get_value_at_percentile(90.0),
449 hdr_lat_1.get_value_at_percentile(99.0),
450 hdr_lat_2.get_value_at_percentile(50.0),
451 hdr_lat_2.get_value_at_percentile(90.0),
452 hdr_lat_2.get_value_at_percentile(99.0)
462 f"1. {(data[u'ndr_low'] / 1e6):.2f} "
463 f"{data[u'ndr_low_b']:.2f}"
464 f"\n2. {(data[u'pdr_low'] / 1e6):.2f} "
465 f"{data[u'pdr_low_b']:.2f}"
468 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
469 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
470 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
473 max_len = len(str(max((max(item) for item in latency))))
475 for idx, lat in enumerate(latency):
478 out_msg += f"\n{idx + 3}. "
479 for count, itm in enumerate(lat):
482 out_msg += u" " * (max_len - len(str(itm)) + 1)
487 except (AttributeError, IndexError, ValueError, KeyError):
488 return u"Test Failed."
490 def _get_testbed(self, msg):
491 """Called when extraction of testbed IP is required.
492 The testbed is identified by TG node IP address.
494 :param msg: Message to process.
499 if msg.message.count(u"Setup of TG node") or \
500 msg.message.count(u"Setup of node TG host"):
501 reg_tg_ip = re.compile(
502 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
504 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
505 except (KeyError, ValueError, IndexError, AttributeError):
508 self._data[u"metadata"][u"testbed"] = self._testbed
509 self._msg_type = None
511 def _get_vpp_version(self, msg):
512 """Called when extraction of VPP version is required.
514 :param msg: Message to process.
519 if msg.message.count(u"return STDOUT Version:") or \
520 msg.message.count(u"VPP Version:") or \
521 msg.message.count(u"VPP version:"):
522 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
524 self._data[u"metadata"][u"version"] = self._version
525 self._msg_type = None
527 def _get_dpdk_version(self, msg):
528 """Called when extraction of DPDK version is required.
530 :param msg: Message to process.
535 if msg.message.count(u"DPDK Version:"):
537 self._version = str(re.search(
538 self.REGEX_VERSION_DPDK, msg.message).group(2))
539 self._data[u"metadata"][u"version"] = self._version
543 self._msg_type = None
545 def _get_timestamp(self, msg):
546 """Called when extraction of timestamp is required.
548 :param msg: Message to process.
553 self._timestamp = msg.timestamp[:14]
554 self._data[u"metadata"][u"generated"] = self._timestamp
555 self._msg_type = None
557 def _get_vat_history(self, msg):
558 """Called when extraction of VAT command history is required.
560 TODO: Remove when not needed.
562 :param msg: Message to process.
566 if msg.message.count(u"VAT command history:"):
567 self._conf_history_lookup_nr += 1
568 if self._conf_history_lookup_nr == 1:
569 self._data[u"tests"][self._test_id][u"conf-history"] = str()
571 self._msg_type = None
572 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
573 r"VAT command history:", u"",
574 msg.message, count=1).replace(u'\n', u' |br| ').\
577 self._data[u"tests"][self._test_id][u"conf-history"] += (
578 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
581 def _get_papi_history(self, msg):
582 """Called when extraction of PAPI command history is required.
584 :param msg: Message to process.
588 if msg.message.count(u"PAPI command history:"):
589 self._conf_history_lookup_nr += 1
590 if self._conf_history_lookup_nr == 1:
591 self._data[u"tests"][self._test_id][u"conf-history"] = str()
593 self._msg_type = None
594 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
595 r"PAPI command history:", u"",
596 msg.message, count=1).replace(u'\n', u' |br| ').\
598 self._data[u"tests"][self._test_id][u"conf-history"] += (
599 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
602 def _get_show_run(self, msg):
603 """Called when extraction of VPP operational data (output of CLI command
604 Show Runtime) is required.
606 :param msg: Message to process.
611 if not msg.message.count(u"stats runtime"):
615 if self._sh_run_counter > 1:
618 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
619 self._data[u"tests"][self._test_id][u"show-run"] = dict()
621 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
625 host = groups.group(1)
626 except (AttributeError, IndexError):
629 sock = groups.group(2)
630 except (AttributeError, IndexError):
633 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
634 replace(u"'", u'"').replace(u'b"', u'"').
635 replace(u'u"', u'"').split(u":", 1)[1])
638 threads_nr = len(runtime[0][u"clocks"])
639 except (IndexError, KeyError):
642 dut = u"DUT{nr}".format(
643 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
648 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
652 for idx in range(threads_nr):
653 if item[u"vectors"][idx] > 0:
654 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
655 elif item[u"calls"][idx] > 0:
656 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
657 elif item[u"suspends"][idx] > 0:
658 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
662 if item[u"calls"][idx] > 0:
663 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
667 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
668 int(item[u"suspends"][idx]):
669 oper[u"threads"][idx].append([
672 item[u"vectors"][idx],
673 item[u"suspends"][idx],
678 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
680 def _get_ndrpdr_throughput(self, msg):
681 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
684 :param msg: The test message to be parsed.
686 :returns: Parsed data as a dict and the status (PASS/FAIL).
687 :rtype: tuple(dict, str)
691 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
692 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
695 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
697 if groups is not None:
699 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
700 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
701 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
702 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
704 except (IndexError, ValueError):
707 return throughput, status
709 def _get_plr_throughput(self, msg):
710 """Get PLRsearch lower bound and PLRsearch upper bound from the test
713 :param msg: The test message to be parsed.
715 :returns: Parsed data as a dict and the status (PASS/FAIL).
716 :rtype: tuple(dict, str)
724 groups = re.search(self.REGEX_PLR_RATE, msg)
726 if groups is not None:
728 throughput[u"LOWER"] = float(groups.group(1))
729 throughput[u"UPPER"] = float(groups.group(2))
731 except (IndexError, ValueError):
734 return throughput, status
736 def _get_ndrpdr_latency(self, msg):
737 """Get LATENCY from the test message.
739 :param msg: The test message to be parsed.
741 :returns: Parsed data as a dict and the status (PASS/FAIL).
742 :rtype: tuple(dict, str)
752 u"direction1": copy.copy(latency_default),
753 u"direction2": copy.copy(latency_default)
756 u"direction1": copy.copy(latency_default),
757 u"direction2": copy.copy(latency_default)
760 u"direction1": copy.copy(latency_default),
761 u"direction2": copy.copy(latency_default)
764 u"direction1": copy.copy(latency_default),
765 u"direction2": copy.copy(latency_default)
768 u"direction1": copy.copy(latency_default),
769 u"direction2": copy.copy(latency_default)
772 u"direction1": copy.copy(latency_default),
773 u"direction2": copy.copy(latency_default)
777 # TODO: Rewrite when long and base are not needed
778 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
780 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
782 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
784 return latency, u"FAIL"
786 def process_latency(in_str):
787 """Return object with parsed latency values.
789 TODO: Define class for the return type.
791 :param in_str: Input string, min/avg/max/hdrh format.
793 :returns: Dict with corresponding keys, except hdrh float values.
795 :throws IndexError: If in_str does not have enough substrings.
796 :throws ValueError: If a substring does not convert to float.
798 in_list = in_str.split('/', 3)
801 u"min": float(in_list[0]),
802 u"avg": float(in_list[1]),
803 u"max": float(in_list[2]),
807 if len(in_list) == 4:
808 rval[u"hdrh"] = str(in_list[3])
813 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
814 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
815 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
816 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
817 if groups.lastindex == 4:
818 return latency, u"PASS"
819 except (IndexError, ValueError):
823 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
824 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
825 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
826 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
827 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
828 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
829 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
830 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
831 if groups.lastindex == 12:
832 return latency, u"PASS"
833 except (IndexError, ValueError):
836 # TODO: Remove when not needed
837 latency[u"NDR10"] = {
838 u"direction1": copy.copy(latency_default),
839 u"direction2": copy.copy(latency_default)
841 latency[u"NDR50"] = {
842 u"direction1": copy.copy(latency_default),
843 u"direction2": copy.copy(latency_default)
845 latency[u"NDR90"] = {
846 u"direction1": copy.copy(latency_default),
847 u"direction2": copy.copy(latency_default)
850 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
851 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
852 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
853 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
854 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
855 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
856 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
857 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
858 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
859 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
860 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
861 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
862 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
863 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
864 return latency, u"PASS"
865 except (IndexError, ValueError):
868 return latency, u"FAIL"
870 def visit_suite(self, suite):
871 """Implements traversing through the suite and its direct children.
873 :param suite: Suite to process.
877 if self.start_suite(suite) is not False:
878 suite.suites.visit(self)
879 suite.tests.visit(self)
880 self.end_suite(suite)
882 def start_suite(self, suite):
883 """Called when suite starts.
885 :param suite: Suite to process.
891 parent_name = suite.parent.name
892 except AttributeError:
895 doc_str = suite.doc.\
896 replace(u'"', u"'").\
897 replace(u'\n', u' ').\
898 replace(u'\r', u'').\
899 replace(u'*[', u' |br| *[').\
900 replace(u"*", u"**").\
901 replace(u' |br| *[', u'*[', 1)
903 self._data[u"suites"][suite.longname.lower().
905 replace(u" ", u"_")] = {
906 u"name": suite.name.lower(),
908 u"parent": parent_name,
909 u"level": len(suite.longname.split(u"."))
912 suite.keywords.visit(self)
914 def end_suite(self, suite):
915 """Called when suite ends.
917 :param suite: Suite to process.
922 def visit_test(self, test):
923 """Implements traversing through the test.
925 :param test: Test to process.
929 if self.start_test(test) is not False:
930 test.keywords.visit(self)
933 def start_test(self, test):
934 """Called when test starts.
936 :param test: Test to process.
941 self._sh_run_counter = 0
943 longname_orig = test.longname.lower()
945 # Check the ignore list
946 if longname_orig in self._ignore:
949 tags = [str(tag) for tag in test.tags]
952 # Change the TC long name and name if defined in the mapping table
953 longname = self._mapping.get(longname_orig, None)
954 if longname is not None:
955 name = longname.split(u'.')[-1]
957 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
961 longname = longname_orig
962 name = test.name.lower()
964 # Remove TC number from the TC long name (backward compatibility):
965 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
966 # Remove TC number from the TC name (not needed):
967 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
969 test_result[u"parent"] = test.parent.name.lower()
970 test_result[u"tags"] = tags
971 test_result["doc"] = test.doc.\
972 replace(u'"', u"'").\
973 replace(u'\n', u' ').\
974 replace(u'\r', u'').\
975 replace(u'[', u' |br| [').\
976 replace(u' |br| [', u'[', 1)
977 test_result[u"type"] = u"FUNC"
978 test_result[u"status"] = test.status
980 if test.status == u"PASS":
981 if u"NDRPDR" in tags:
982 test_result[u"msg"] = self._get_data_from_perf_test_msg(
983 test.message).replace(u'\n', u' |br| ').\
984 replace(u'\r', u'').replace(u'"', u"'")
985 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
986 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
987 test.message).replace(u'\n', u' |br| ').\
988 replace(u'\r', u'').replace(u'"', u"'")
990 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
991 replace(u'\r', u'').replace(u'"', u"'")
993 test_result[u"msg"] = u"Test Failed."
995 if u"PERFTEST" in tags:
996 # Replace info about cores (e.g. -1c-) with the info about threads
997 # and cores (e.g. -1t1c-) in the long test case names and in the
998 # test case names if necessary.
999 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1003 for tag in test_result[u"tags"]:
1004 groups = re.search(self.REGEX_TC_TAG, tag)
1010 self._test_id = re.sub(
1011 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1012 self._test_id, count=1
1014 test_result[u"name"] = re.sub(
1015 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1016 test_result["name"], count=1
1019 test_result[u"status"] = u"FAIL"
1020 self._data[u"tests"][self._test_id] = test_result
1022 f"The test {self._test_id} has no or more than one "
1023 f"multi-threading tags.\n"
1024 f"Tags: {test_result[u'tags']}"
1028 if test.status == u"PASS":
1029 if u"NDRPDR" in tags:
1030 test_result[u"type"] = u"NDRPDR"
1031 test_result[u"throughput"], test_result[u"status"] = \
1032 self._get_ndrpdr_throughput(test.message)
1033 test_result[u"latency"], test_result[u"status"] = \
1034 self._get_ndrpdr_latency(test.message)
1035 elif u"SOAK" in tags:
1036 test_result[u"type"] = u"SOAK"
1037 test_result[u"throughput"], test_result[u"status"] = \
1038 self._get_plr_throughput(test.message)
1039 elif u"TCP" in tags:
1040 test_result[u"type"] = u"TCP"
1041 groups = re.search(self.REGEX_TCP, test.message)
1042 test_result[u"result"] = int(groups.group(2))
1043 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1045 test_result[u"type"] = u"MRR"
1047 test_result[u"type"] = u"BMRR"
1049 test_result[u"result"] = dict()
1050 groups = re.search(self.REGEX_BMRR, test.message)
1051 if groups is not None:
1052 items_str = groups.group(1)
1053 items_float = [float(item.strip()) for item
1054 in items_str.split(",")]
1055 # Use whole list in CSIT-1180.
1056 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1057 test_result[u"result"][u"receive-rate"] = stats.avg
1059 groups = re.search(self.REGEX_MRR, test.message)
1060 test_result[u"result"][u"receive-rate"] = \
1061 float(groups.group(3)) / float(groups.group(1))
1062 elif u"RECONF" in tags:
1063 test_result[u"type"] = u"RECONF"
1064 test_result[u"result"] = None
1066 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1067 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1068 test_result[u"result"] = {
1069 u"loss": int(grps_loss.group(1)),
1070 u"time": float(grps_time.group(1))
1072 except (AttributeError, IndexError, ValueError, TypeError):
1073 test_result[u"status"] = u"FAIL"
1074 elif u"DEVICETEST" in tags:
1075 test_result[u"type"] = u"DEVICETEST"
1077 test_result[u"status"] = u"FAIL"
1078 self._data[u"tests"][self._test_id] = test_result
1081 self._data[u"tests"][self._test_id] = test_result
1083 def end_test(self, test):
1084 """Called when test ends.
1086 :param test: Test to process.
1091 def visit_keyword(self, keyword):
1092 """Implements traversing through the keyword and its child keywords.
1094 :param keyword: Keyword to process.
1095 :type keyword: Keyword
1098 if self.start_keyword(keyword) is not False:
1099 self.end_keyword(keyword)
1101 def start_keyword(self, keyword):
1102 """Called when keyword starts. Default implementation does nothing.
1104 :param keyword: Keyword to process.
1105 :type keyword: Keyword
1109 if keyword.type == u"setup":
1110 self.visit_setup_kw(keyword)
1111 elif keyword.type == u"teardown":
1112 self.visit_teardown_kw(keyword)
1114 self.visit_test_kw(keyword)
1115 except AttributeError:
1118 def end_keyword(self, keyword):
1119 """Called when keyword ends. Default implementation does nothing.
1121 :param keyword: Keyword to process.
1122 :type keyword: Keyword
1126 def visit_test_kw(self, test_kw):
1127 """Implements traversing through the test keyword and its child
1130 :param test_kw: Keyword to process.
1131 :type test_kw: Keyword
1134 for keyword in test_kw.keywords:
1135 if self.start_test_kw(keyword) is not False:
1136 self.visit_test_kw(keyword)
1137 self.end_test_kw(keyword)
1139 def start_test_kw(self, test_kw):
1140 """Called when test keyword starts. Default implementation does
1143 :param test_kw: Keyword to process.
1144 :type test_kw: Keyword
1147 if test_kw.name.count(u"Show Runtime On All Duts") or \
1148 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1149 self._msg_type = u"test-show-runtime"
1150 self._sh_run_counter += 1
1151 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1152 self._msg_type = u"dpdk-version"
1155 test_kw.messages.visit(self)
1157 def end_test_kw(self, test_kw):
1158 """Called when keyword ends. Default implementation does nothing.
1160 :param test_kw: Keyword to process.
1161 :type test_kw: Keyword
1165 def visit_setup_kw(self, setup_kw):
1166 """Implements traversing through the teardown keyword and its child
1169 :param setup_kw: Keyword to process.
1170 :type setup_kw: Keyword
1173 for keyword in setup_kw.keywords:
1174 if self.start_setup_kw(keyword) is not False:
1175 self.visit_setup_kw(keyword)
1176 self.end_setup_kw(keyword)
1178 def start_setup_kw(self, setup_kw):
1179 """Called when teardown keyword starts. Default implementation does
1182 :param setup_kw: Keyword to process.
1183 :type setup_kw: Keyword
1186 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1187 and not self._version:
1188 self._msg_type = u"vpp-version"
1189 elif setup_kw.name.count(u"Set Global Variable") \
1190 and not self._timestamp:
1191 self._msg_type = u"timestamp"
1192 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1193 self._msg_type = u"testbed"
1196 setup_kw.messages.visit(self)
1198 def end_setup_kw(self, setup_kw):
1199 """Called when keyword ends. Default implementation does nothing.
1201 :param setup_kw: Keyword to process.
1202 :type setup_kw: Keyword
1206 def visit_teardown_kw(self, teardown_kw):
1207 """Implements traversing through the teardown keyword and its child
1210 :param teardown_kw: Keyword to process.
1211 :type teardown_kw: Keyword
1214 for keyword in teardown_kw.keywords:
1215 if self.start_teardown_kw(keyword) is not False:
1216 self.visit_teardown_kw(keyword)
1217 self.end_teardown_kw(keyword)
1219 def start_teardown_kw(self, teardown_kw):
1220 """Called when teardown keyword starts
1222 :param teardown_kw: Keyword to process.
1223 :type teardown_kw: Keyword
1227 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1228 # TODO: Remove when not needed:
1229 self._conf_history_lookup_nr = 0
1230 self._msg_type = u"teardown-vat-history"
1231 teardown_kw.messages.visit(self)
1232 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1233 self._conf_history_lookup_nr = 0
1234 self._msg_type = u"teardown-papi-history"
1235 teardown_kw.messages.visit(self)
1237 def end_teardown_kw(self, teardown_kw):
1238 """Called when keyword ends. Default implementation does nothing.
1240 :param teardown_kw: Keyword to process.
1241 :type teardown_kw: Keyword
1245 def visit_message(self, msg):
1246 """Implements visiting the message.
1248 :param msg: Message to process.
1252 if self.start_message(msg) is not False:
1253 self.end_message(msg)
1255 def start_message(self, msg):
1256 """Called when message starts. Get required information from messages:
1259 :param msg: Message to process.
1265 self.parse_msg[self._msg_type](msg)
1267 def end_message(self, msg):
1268 """Called when message ends. Default implementation does nothing.
1270 :param msg: Message to process.
1279 The data is extracted from output.xml files generated by Jenkins jobs and
1280 stored in pandas' DataFrames.
1286 (as described in ExecutionChecker documentation)
1288 (as described in ExecutionChecker documentation)
1290 (as described in ExecutionChecker documentation)
1293 def __init__(self, spec):
1296 :param spec: Specification.
1297 :type spec: Specification
1304 self._input_data = pd.Series()
1308 """Getter - Input data.
1310 :returns: Input data
1311 :rtype: pandas.Series
1313 return self._input_data
1315 def metadata(self, job, build):
1316 """Getter - metadata
1318 :param job: Job which metadata we want.
1319 :param build: Build which metadata we want.
1323 :rtype: pandas.Series
1326 return self.data[job][build][u"metadata"]
1328 def suites(self, job, build):
1331 :param job: Job which suites we want.
1332 :param build: Build which suites we want.
1336 :rtype: pandas.Series
1339 return self.data[job][str(build)][u"suites"]
1341 def tests(self, job, build):
1344 :param job: Job which tests we want.
1345 :param build: Build which tests we want.
1349 :rtype: pandas.Series
1352 return self.data[job][build][u"tests"]
1354 def _parse_tests(self, job, build, log):
1355 """Process data from robot output.xml file and return JSON structured
1358 :param job: The name of job which build output data will be processed.
1359 :param build: The build which output data will be processed.
1360 :param log: List of log messages.
1363 :type log: list of tuples (severity, msg)
1364 :returns: JSON data structure.
1373 with open(build[u"file-name"], u'r') as data_file:
1375 result = ExecutionResult(data_file)
1376 except errors.DataError as err:
1378 (u"ERROR", f"Error occurred while parsing output.xml: "
1382 checker = ExecutionChecker(metadata, self._cfg.mapping,
1384 result.visit(checker)
1388 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1389 """Download and parse the input data file.
1391 :param pid: PID of the process executing this method.
1392 :param job: Name of the Jenkins job which generated the processed input
1394 :param build: Information about the Jenkins build which generated the
1395 processed input file.
1396 :param repeat: Repeat the download specified number of times if not
1407 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1415 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1423 f"It is not possible to download the input data file from the "
1424 f"job {job}, build {build[u'build']}, or it is damaged. "
1430 f" Processing data from the build {build[u'build']} ...")
1432 data = self._parse_tests(job, build, logs)
1436 f"Input data file from the job {job}, build "
1437 f"{build[u'build']} is damaged. Skipped.")
1440 state = u"processed"
1443 remove(build[u"file-name"])
1444 except OSError as err:
1446 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1450 # If the time-period is defined in the specification file, remove all
1451 # files which are outside the time period.
1452 timeperiod = self._cfg.input.get(u"time-period", None)
1453 if timeperiod and data:
1455 timeperiod = timedelta(int(timeperiod))
1456 metadata = data.get(u"metadata", None)
1458 generated = metadata.get(u"generated", None)
1460 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1461 if (now - generated) > timeperiod:
1462 # Remove the data and the file:
1467 f" The build {job}/{build[u'build']} is "
1468 f"outdated, will be removed.")
1470 logs.append((u"INFO", u" Done."))
1472 for level, line in logs:
1473 if level == u"INFO":
1475 elif level == u"ERROR":
1477 elif level == u"DEBUG":
1479 elif level == u"CRITICAL":
1480 logging.critical(line)
1481 elif level == u"WARNING":
1482 logging.warning(line)
1484 return {u"data": data, u"state": state, u"job": job, u"build": build}
1486 def download_and_parse_data(self, repeat=1):
1487 """Download the input data files, parse input data from input files and
1488 store in pandas' Series.
1490 :param repeat: Repeat the download specified number of times if not
1495 logging.info(u"Downloading and parsing input files ...")
1497 for job, builds in self._cfg.builds.items():
1498 for build in builds:
1500 result = self._download_and_parse_build(job, build, repeat)
1501 build_nr = result[u"build"][u"build"]
1504 data = result[u"data"]
1505 build_data = pd.Series({
1506 u"metadata": pd.Series(
1507 list(data[u"metadata"].values()),
1508 index=list(data[u"metadata"].keys())
1510 u"suites": pd.Series(
1511 list(data[u"suites"].values()),
1512 index=list(data[u"suites"].keys())
1514 u"tests": pd.Series(
1515 list(data[u"tests"].values()),
1516 index=list(data[u"tests"].keys())
1520 if self._input_data.get(job, None) is None:
1521 self._input_data[job] = pd.Series()
1522 self._input_data[job][str(build_nr)] = build_data
1524 self._cfg.set_input_file_name(
1525 job, build_nr, result[u"build"][u"file-name"])
1527 self._cfg.set_input_state(job, build_nr, result[u"state"])
1530 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1531 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1533 logging.info(u"Done.")
1536 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1537 """Return the index of character in the string which is the end of tag.
1539 :param tag_filter: The string where the end of tag is being searched.
1540 :param start: The index where the searching is stated.
1541 :param closer: The character which is the tag closer.
1542 :type tag_filter: str
1545 :returns: The index of the tag closer.
1550 idx_opener = tag_filter.index(closer, start)
1551 return tag_filter.index(closer, idx_opener + 1)
1556 def _condition(tag_filter):
1557 """Create a conditional statement from the given tag filter.
1559 :param tag_filter: Filter based on tags from the element specification.
1560 :type tag_filter: str
1561 :returns: Conditional statement which can be evaluated.
1567 index = InputData._end_of_tag(tag_filter, index)
1571 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1573 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1574 continue_on_error=False):
1575 """Filter required data from the given jobs and builds.
1577 The output data structure is:
1581 - test (or suite) 1 ID:
1587 - test (or suite) n ID:
1594 :param element: Element which will use the filtered data.
1595 :param params: Parameters which will be included in the output. If None,
1596 all parameters are included.
1597 :param data: If not None, this data is used instead of data specified
1599 :param data_set: The set of data to be filtered: tests, suites,
1601 :param continue_on_error: Continue if there is error while reading the
1602 data. The Item will be empty then
1603 :type element: pandas.Series
1607 :type continue_on_error: bool
1608 :returns: Filtered data.
1609 :rtype pandas.Series
1613 if data_set == "suites":
1615 elif element[u"filter"] in (u"all", u"template"):
1618 cond = InputData._condition(element[u"filter"])
1619 logging.debug(f" Filter: {cond}")
1621 logging.error(u" No filter defined.")
1625 params = element.get(u"parameters", None)
1627 params.append(u"type")
1629 data_to_filter = data if data else element[u"data"]
1632 for job, builds in data_to_filter.items():
1633 data[job] = pd.Series()
1634 for build in builds:
1635 data[job][str(build)] = pd.Series()
1638 self.data[job][str(build)][data_set].items())
1640 if continue_on_error:
1644 for test_id, test_data in data_dict.items():
1645 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1646 data[job][str(build)][test_id] = pd.Series()
1648 for param, val in test_data.items():
1649 data[job][str(build)][test_id][param] = val
1651 for param in params:
1653 data[job][str(build)][test_id][param] =\
1656 data[job][str(build)][test_id][param] =\
1660 except (KeyError, IndexError, ValueError) as err:
1662 f"Missing mandatory parameter in the element specification: "
1666 except AttributeError as err:
1667 logging.error(repr(err))
1669 except SyntaxError as err:
1671 f"The filter {cond} is not correct. Check if all tags are "
1672 f"enclosed by apostrophes.\n{repr(err)}"
1676 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1677 continue_on_error=False):
1678 """Filter required data from the given jobs and builds.
1680 The output data structure is:
1684 - test (or suite) 1 ID:
1690 - test (or suite) n ID:
1697 :param element: Element which will use the filtered data.
1698 :param params: Parameters which will be included in the output. If None,
1699 all parameters are included.
1700 :param data_set: The set of data to be filtered: tests, suites,
1702 :param continue_on_error: Continue if there is error while reading the
1703 data. The Item will be empty then
1704 :type element: pandas.Series
1707 :type continue_on_error: bool
1708 :returns: Filtered data.
1709 :rtype pandas.Series
1712 include = element.get(u"include", None)
1714 logging.warning(u"No tests to include, skipping the element.")
1718 params = element.get(u"parameters", None)
1720 params.append(u"type")
1724 for job, builds in element[u"data"].items():
1725 data[job] = pd.Series()
1726 for build in builds:
1727 data[job][str(build)] = pd.Series()
1728 for test in include:
1730 reg_ex = re.compile(str(test).lower())
1731 for test_id in self.data[job][
1732 str(build)][data_set].keys():
1733 if re.match(reg_ex, str(test_id).lower()):
1734 test_data = self.data[job][
1735 str(build)][data_set][test_id]
1736 data[job][str(build)][test_id] = pd.Series()
1738 for param, val in test_data.items():
1739 data[job][str(build)][test_id]\
1742 for param in params:
1744 data[job][str(build)][
1748 data[job][str(build)][
1749 test_id][param] = u"No Data"
1750 except KeyError as err:
1751 logging.error(repr(err))
1752 if continue_on_error:
1757 except (KeyError, IndexError, ValueError) as err:
1759 f"Missing mandatory parameter in the element "
1760 f"specification: {repr(err)}"
1763 except AttributeError as err:
1764 logging.error(repr(err))
1768 def merge_data(data):
1769 """Merge data from more jobs and builds to a simple data structure.
1771 The output data structure is:
1773 - test (suite) 1 ID:
1779 - test (suite) n ID:
1782 :param data: Data to merge.
1783 :type data: pandas.Series
1784 :returns: Merged data.
1785 :rtype: pandas.Series
1788 logging.info(u" Merging data ...")
1790 merged_data = pd.Series()
1791 for builds in data.values:
1792 for item in builds.values:
1793 for item_id, item_data in item.items():
1794 merged_data[item_id] = item_data
1798 def print_all_oper_data(self):
1799 """Print all operational data to console.
1807 u"Cycles per Packet",
1808 u"Average Vector Size"
1811 for job in self._input_data.values:
1812 for build in job.values:
1813 for test_id, test_data in build[u"tests"].items():
1815 if test_data.get(u"show-run", None) is None:
1817 for dut_name, data in test_data[u"show-run"].items():
1818 if data.get(u"threads", None) is None:
1820 print(f"Host IP: {data.get(u'host', '')}, "
1821 f"Socket: {data.get(u'socket', '')}")
1822 for thread_nr, thread in data[u"threads"].items():
1823 txt_table = prettytable.PrettyTable(tbl_hdr)
1826 txt_table.add_row(row)
1828 if len(thread) == 0:
1831 avg = f", Average Vector Size per Node: " \
1832 f"{(avg / len(thread)):.2f}"
1833 th_name = u"main" if thread_nr == 0 \
1834 else f"worker_{thread_nr}"
1835 print(f"{dut_name}, {th_name}{avg}")
1836 txt_table.float_format = u".2"
1837 txt_table.align = u"r"
1838 txt_table.align[u"Name"] = u"l"
1839 print(f"{txt_table.get_string()}\n")