1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
223 # TODO: Remove when not needed
224 REGEX_NDRPDR_LAT_BASE = re.compile(
225 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
226 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
228 REGEX_NDRPDR_LAT = re.compile(
229 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
230 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
231 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
236 # TODO: Remove when not needed
237 REGEX_NDRPDR_LAT_LONG = re.compile(
238 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
239 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
240 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
248 REGEX_VERSION_VPP = re.compile(
249 r"(return STDOUT Version:\s*|"
250 r"VPP Version:\s*|VPP version:\s*)(.*)"
252 REGEX_VERSION_DPDK = re.compile(
253 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
255 REGEX_TCP = re.compile(
256 r'Total\s(rps|cps|throughput):\s(\d*).*$'
258 REGEX_MRR = re.compile(
259 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
260 r'tx\s(\d*),\srx\s(\d*)'
262 REGEX_BMRR = re.compile(
263 r'Maximum Receive Rate trial results'
264 r' in packets per second: \[(.*)\]'
266 REGEX_RECONF_LOSS = re.compile(
267 r'Packets lost due to reconfig: (\d*)'
269 REGEX_RECONF_TIME = re.compile(
270 r'Implied time lost: (\d*.[\de-]*)'
272 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
274 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
276 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
278 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
280 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
282 def __init__(self, metadata, mapping, ignore):
285 :param metadata: Key-value pairs to be included in "metadata" part of
287 :param mapping: Mapping of the old names of test cases to the new
289 :param ignore: List of TCs to be ignored.
295 # Type of message to parse out from the test messages
296 self._msg_type = None
302 self._timestamp = None
304 # Testbed. The testbed is identified by TG node IP address.
307 # Mapping of TCs long names
308 self._mapping = mapping
311 self._ignore = ignore
313 # Number of PAPI History messages found:
315 # 1 - PAPI History of DUT1
316 # 2 - PAPI History of DUT2
317 self._conf_history_lookup_nr = 0
319 self._sh_run_counter = 0
321 # Test ID of currently processed test- the lowercase full path to the
325 # The main data structure
327 u"metadata": OrderedDict(),
328 u"suites": OrderedDict(),
329 u"tests": OrderedDict()
332 # Save the provided metadata
333 for key, val in metadata.items():
334 self._data[u"metadata"][key] = val
336 # Dictionary defining the methods used to parse different types of
339 u"timestamp": self._get_timestamp,
340 u"vpp-version": self._get_vpp_version,
341 u"dpdk-version": self._get_dpdk_version,
342 # TODO: Remove when not needed:
343 u"teardown-vat-history": self._get_vat_history,
344 u"teardown-papi-history": self._get_papi_history,
345 u"test-show-runtime": self._get_show_run,
346 u"testbed": self._get_testbed
351 """Getter - Data parsed from the XML file.
353 :returns: Data parsed from the XML file.
358 def _get_data_from_perf_test_msg(self, msg):
366 from message of NDRPDR performance tests.
368 :param msg: Message to be processed.
370 :returns: Processed message or original message if a problem occurs.
374 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
375 if not groups or groups.lastindex != 10:
380 u"ndr_low": float(groups.group(1)),
381 u"ndr_low_b": float(groups.group(2)),
382 u"pdr_low": float(groups.group(3)),
383 u"pdr_low_b": float(groups.group(4)),
384 u"pdr_lat_90_1": groups.group(5),
385 u"pdr_lat_90_2": groups.group(6),
386 u"pdr_lat_50_1": groups.group(7),
387 u"pdr_lat_50_2": groups.group(8),
388 u"pdr_lat_10_1": groups.group(9),
389 u"pdr_lat_10_2": groups.group(10),
391 except (AttributeError, IndexError, ValueError, KeyError):
394 def _process_lat(in_str_1, in_str_2):
395 """Extract min, avg, max values from latency string.
397 :param in_str_1: Latency string for one direction produced by robot
399 :param in_str_2: Latency string for second direction produced by
403 :returns: Processed latency string or empty string if a problem
405 :rtype: tuple(str, str)
407 in_list_1 = in_str_1.split('/', 3)
408 in_list_2 = in_str_2.split('/', 3)
410 if len(in_list_1) != 4 and len(in_list_2) != 4:
413 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
415 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
416 except hdrh.codec.HdrLengthException:
419 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
421 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
422 except hdrh.codec.HdrLengthException:
425 if hdr_lat_1 and hdr_lat_2:
426 hdr_lat_1_50 = hdr_lat_1.get_value_at_percentile(50.0)
427 hdr_lat_1_90 = hdr_lat_1.get_value_at_percentile(90.0)
428 hdr_lat_1_99 = hdr_lat_1.get_value_at_percentile(99.0)
429 hdr_lat_2_50 = hdr_lat_2.get_value_at_percentile(50.0)
430 hdr_lat_2_90 = hdr_lat_2.get_value_at_percentile(90.0)
431 hdr_lat_2_99 = hdr_lat_2.get_value_at_percentile(99.0)
433 if (hdr_lat_1_50 + hdr_lat_1_90 + hdr_lat_1_99 +
434 hdr_lat_2_50 + hdr_lat_2_90 + hdr_lat_2_99):
436 f"{hdr_lat_1_50} {hdr_lat_1_90} {hdr_lat_1_99} , "
437 f"{hdr_lat_2_50} {hdr_lat_2_90} {hdr_lat_2_99}"
443 pdr_lat_10 = _process_lat(data[u'pdr_lat_10_1'],
444 data[u'pdr_lat_10_2'])
445 pdr_lat_50 = _process_lat(data[u'pdr_lat_50_1'],
446 data[u'pdr_lat_50_2'])
447 pdr_lat_90 = _process_lat(data[u'pdr_lat_90_1'],
448 data[u'pdr_lat_90_2'])
449 pdr_lat_10 = f"\n3. {pdr_lat_10}" if pdr_lat_10 else u""
450 pdr_lat_50 = f"\n4. {pdr_lat_50}" if pdr_lat_50 else u""
451 pdr_lat_90 = f"\n5. {pdr_lat_90}" if pdr_lat_90 else u""
454 f"1. {(data[u'ndr_low'] / 1e6):.2f} {data[u'ndr_low_b']:.2f}"
455 f"\n2. {(data[u'pdr_low'] / 1e6):.2f} {data[u'pdr_low_b']:.2f}"
460 except (AttributeError, IndexError, ValueError, KeyError):
463 def _get_testbed(self, msg):
464 """Called when extraction of testbed IP is required.
465 The testbed is identified by TG node IP address.
467 :param msg: Message to process.
472 if msg.message.count(u"Setup of TG node") or \
473 msg.message.count(u"Setup of node TG host"):
474 reg_tg_ip = re.compile(
475 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
477 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
478 except (KeyError, ValueError, IndexError, AttributeError):
481 self._data[u"metadata"][u"testbed"] = self._testbed
482 self._msg_type = None
484 def _get_vpp_version(self, msg):
485 """Called when extraction of VPP version is required.
487 :param msg: Message to process.
492 if msg.message.count(u"return STDOUT Version:") or \
493 msg.message.count(u"VPP Version:") or \
494 msg.message.count(u"VPP version:"):
495 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
497 self._data[u"metadata"][u"version"] = self._version
498 self._msg_type = None
500 def _get_dpdk_version(self, msg):
501 """Called when extraction of DPDK version is required.
503 :param msg: Message to process.
508 if msg.message.count(u"DPDK Version:"):
510 self._version = str(re.search(
511 self.REGEX_VERSION_DPDK, msg.message).group(2))
512 self._data[u"metadata"][u"version"] = self._version
516 self._msg_type = None
518 def _get_timestamp(self, msg):
519 """Called when extraction of timestamp is required.
521 :param msg: Message to process.
526 self._timestamp = msg.timestamp[:14]
527 self._data[u"metadata"][u"generated"] = self._timestamp
528 self._msg_type = None
530 def _get_vat_history(self, msg):
531 """Called when extraction of VAT command history is required.
533 TODO: Remove when not needed.
535 :param msg: Message to process.
539 if msg.message.count(u"VAT command history:"):
540 self._conf_history_lookup_nr += 1
541 if self._conf_history_lookup_nr == 1:
542 self._data[u"tests"][self._test_id][u"conf-history"] = str()
544 self._msg_type = None
545 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
546 r"VAT command history:", u"",
547 msg.message, count=1).replace(u'\n', u' |br| ').\
550 self._data[u"tests"][self._test_id][u"conf-history"] += (
551 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
554 def _get_papi_history(self, msg):
555 """Called when extraction of PAPI command history is required.
557 :param msg: Message to process.
561 if msg.message.count(u"PAPI command history:"):
562 self._conf_history_lookup_nr += 1
563 if self._conf_history_lookup_nr == 1:
564 self._data[u"tests"][self._test_id][u"conf-history"] = str()
566 self._msg_type = None
567 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
568 r"PAPI command history:", u"",
569 msg.message, count=1).replace(u'\n', u' |br| ').\
571 self._data[u"tests"][self._test_id][u"conf-history"] += (
572 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
575 def _get_show_run(self, msg):
576 """Called when extraction of VPP operational data (output of CLI command
577 Show Runtime) is required.
579 :param msg: Message to process.
584 if not msg.message.count(u"stats runtime"):
588 if self._sh_run_counter > 1:
591 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
592 self._data[u"tests"][self._test_id][u"show-run"] = dict()
594 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
598 host = groups.group(1)
599 except (AttributeError, IndexError):
602 sock = groups.group(2)
603 except (AttributeError, IndexError):
606 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
607 replace(u"'", u'"').replace(u'b"', u'"').
608 replace(u'u"', u'"').split(u":", 1)[1])
611 threads_nr = len(runtime[0][u"clocks"])
612 except (IndexError, KeyError):
615 dut = u"DUT{nr}".format(
616 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
621 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
625 for idx in range(threads_nr):
626 if item[u"vectors"][idx] > 0:
627 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
628 elif item[u"calls"][idx] > 0:
629 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
630 elif item[u"suspends"][idx] > 0:
631 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
635 if item[u"calls"][idx] > 0:
636 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
640 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
641 int(item[u"suspends"][idx]):
642 oper[u"threads"][idx].append([
645 item[u"vectors"][idx],
646 item[u"suspends"][idx],
651 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
653 def _get_ndrpdr_throughput(self, msg):
654 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
657 :param msg: The test message to be parsed.
659 :returns: Parsed data as a dict and the status (PASS/FAIL).
660 :rtype: tuple(dict, str)
664 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
665 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
668 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
670 if groups is not None:
672 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
673 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
674 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
675 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
677 except (IndexError, ValueError):
680 return throughput, status
682 def _get_plr_throughput(self, msg):
683 """Get PLRsearch lower bound and PLRsearch upper bound from the test
686 :param msg: The test message to be parsed.
688 :returns: Parsed data as a dict and the status (PASS/FAIL).
689 :rtype: tuple(dict, str)
697 groups = re.search(self.REGEX_PLR_RATE, msg)
699 if groups is not None:
701 throughput[u"LOWER"] = float(groups.group(1))
702 throughput[u"UPPER"] = float(groups.group(2))
704 except (IndexError, ValueError):
707 return throughput, status
709 def _get_ndrpdr_latency(self, msg):
710 """Get LATENCY from the test message.
712 :param msg: The test message to be parsed.
714 :returns: Parsed data as a dict and the status (PASS/FAIL).
715 :rtype: tuple(dict, str)
725 u"direction1": copy.copy(latency_default),
726 u"direction2": copy.copy(latency_default)
729 u"direction1": copy.copy(latency_default),
730 u"direction2": copy.copy(latency_default)
733 u"direction1": copy.copy(latency_default),
734 u"direction2": copy.copy(latency_default)
737 u"direction1": copy.copy(latency_default),
738 u"direction2": copy.copy(latency_default)
741 u"direction1": copy.copy(latency_default),
742 u"direction2": copy.copy(latency_default)
745 u"direction1": copy.copy(latency_default),
746 u"direction2": copy.copy(latency_default)
750 # TODO: Rewrite when long and base are not needed
751 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
753 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
755 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
757 return latency, u"FAIL"
759 def process_latency(in_str):
760 """Return object with parsed latency values.
762 TODO: Define class for the return type.
764 :param in_str: Input string, min/avg/max/hdrh format.
766 :returns: Dict with corresponding keys, except hdrh float values.
768 :throws IndexError: If in_str does not have enough substrings.
769 :throws ValueError: If a substring does not convert to float.
771 in_list = in_str.split('/', 3)
774 u"min": float(in_list[0]),
775 u"avg": float(in_list[1]),
776 u"max": float(in_list[2]),
780 if len(in_list) == 4:
781 rval[u"hdrh"] = str(in_list[3])
786 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
787 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
788 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
789 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
790 if groups.lastindex == 4:
791 return latency, u"PASS"
792 except (IndexError, ValueError):
796 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
797 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
798 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
799 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
800 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
801 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
802 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
803 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
804 if groups.lastindex == 12:
805 return latency, u"PASS"
806 except (IndexError, ValueError):
809 # TODO: Remove when not needed
810 latency[u"NDR10"] = {
811 u"direction1": copy.copy(latency_default),
812 u"direction2": copy.copy(latency_default)
814 latency[u"NDR50"] = {
815 u"direction1": copy.copy(latency_default),
816 u"direction2": copy.copy(latency_default)
818 latency[u"NDR90"] = {
819 u"direction1": copy.copy(latency_default),
820 u"direction2": copy.copy(latency_default)
823 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
824 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
825 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
826 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
827 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
828 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
829 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
830 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
831 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
832 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
833 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
834 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
835 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
836 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
837 return latency, u"PASS"
838 except (IndexError, ValueError):
841 return latency, u"FAIL"
843 def visit_suite(self, suite):
844 """Implements traversing through the suite and its direct children.
846 :param suite: Suite to process.
850 if self.start_suite(suite) is not False:
851 suite.suites.visit(self)
852 suite.tests.visit(self)
853 self.end_suite(suite)
855 def start_suite(self, suite):
856 """Called when suite starts.
858 :param suite: Suite to process.
864 parent_name = suite.parent.name
865 except AttributeError:
868 doc_str = suite.doc.\
869 replace(u'"', u"'").\
870 replace(u'\n', u' ').\
871 replace(u'\r', u'').\
872 replace(u'*[', u' |br| *[').\
873 replace(u"*", u"**").\
874 replace(u' |br| *[', u'*[', 1)
876 self._data[u"suites"][suite.longname.lower().
878 replace(u" ", u"_")] = {
879 u"name": suite.name.lower(),
881 u"parent": parent_name,
882 u"level": len(suite.longname.split(u"."))
885 suite.keywords.visit(self)
887 def end_suite(self, suite):
888 """Called when suite ends.
890 :param suite: Suite to process.
895 def visit_test(self, test):
896 """Implements traversing through the test.
898 :param test: Test to process.
902 if self.start_test(test) is not False:
903 test.keywords.visit(self)
906 def start_test(self, test):
907 """Called when test starts.
909 :param test: Test to process.
914 self._sh_run_counter = 0
916 longname_orig = test.longname.lower()
918 # Check the ignore list
919 if longname_orig in self._ignore:
922 tags = [str(tag) for tag in test.tags]
925 # Change the TC long name and name if defined in the mapping table
926 longname = self._mapping.get(longname_orig, None)
927 if longname is not None:
928 name = longname.split(u'.')[-1]
930 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
934 longname = longname_orig
935 name = test.name.lower()
937 # Remove TC number from the TC long name (backward compatibility):
938 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
939 # Remove TC number from the TC name (not needed):
940 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
942 test_result[u"parent"] = test.parent.name.lower()
943 test_result[u"tags"] = tags
944 test_result["doc"] = test.doc.\
945 replace(u'"', u"'").\
946 replace(u'\n', u' ').\
947 replace(u'\r', u'').\
948 replace(u'[', u' |br| [').\
949 replace(u' |br| [', u'[', 1)
950 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
951 replace(u'\n', u' |br| ').\
952 replace(u'\r', u'').\
954 test_result[u"type"] = u"FUNC"
955 test_result[u"status"] = test.status
957 if u"PERFTEST" in tags:
958 # Replace info about cores (e.g. -1c-) with the info about threads
959 # and cores (e.g. -1t1c-) in the long test case names and in the
960 # test case names if necessary.
961 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
965 for tag in test_result[u"tags"]:
966 groups = re.search(self.REGEX_TC_TAG, tag)
972 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
973 f"-{tag_tc.lower()}-",
976 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
977 f"-{tag_tc.lower()}-",
981 test_result[u"status"] = u"FAIL"
982 self._data[u"tests"][self._test_id] = test_result
984 f"The test {self._test_id} has no or more than one "
985 f"multi-threading tags.\n"
986 f"Tags: {test_result[u'tags']}"
990 if test.status == u"PASS":
991 if u"NDRPDR" in tags:
992 test_result[u"type"] = u"NDRPDR"
993 test_result[u"throughput"], test_result[u"status"] = \
994 self._get_ndrpdr_throughput(test.message)
995 test_result[u"latency"], test_result[u"status"] = \
996 self._get_ndrpdr_latency(test.message)
997 elif u"SOAK" in tags:
998 test_result[u"type"] = u"SOAK"
999 test_result[u"throughput"], test_result[u"status"] = \
1000 self._get_plr_throughput(test.message)
1001 elif u"TCP" in tags:
1002 test_result[u"type"] = u"TCP"
1003 groups = re.search(self.REGEX_TCP, test.message)
1004 test_result[u"result"] = int(groups.group(2))
1005 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1007 test_result[u"type"] = u"MRR"
1009 test_result[u"type"] = u"BMRR"
1011 test_result[u"result"] = dict()
1012 groups = re.search(self.REGEX_BMRR, test.message)
1013 if groups is not None:
1014 items_str = groups.group(1)
1015 items_float = [float(item.strip()) for item
1016 in items_str.split(",")]
1017 # Use whole list in CSIT-1180.
1018 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1019 test_result[u"result"][u"receive-rate"] = stats.avg
1021 groups = re.search(self.REGEX_MRR, test.message)
1022 test_result[u"result"][u"receive-rate"] = \
1023 float(groups.group(3)) / float(groups.group(1))
1024 elif u"RECONF" in tags:
1025 test_result[u"type"] = u"RECONF"
1026 test_result[u"result"] = None
1028 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1029 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1030 test_result[u"result"] = {
1031 u"loss": int(grps_loss.group(1)),
1032 u"time": float(grps_time.group(1))
1034 except (AttributeError, IndexError, ValueError, TypeError):
1035 test_result[u"status"] = u"FAIL"
1036 elif u"DEVICETEST" in tags:
1037 test_result[u"type"] = u"DEVICETEST"
1039 test_result[u"status"] = u"FAIL"
1040 self._data[u"tests"][self._test_id] = test_result
1043 self._data[u"tests"][self._test_id] = test_result
1045 def end_test(self, test):
1046 """Called when test ends.
1048 :param test: Test to process.
1053 def visit_keyword(self, keyword):
1054 """Implements traversing through the keyword and its child keywords.
1056 :param keyword: Keyword to process.
1057 :type keyword: Keyword
1060 if self.start_keyword(keyword) is not False:
1061 self.end_keyword(keyword)
1063 def start_keyword(self, keyword):
1064 """Called when keyword starts. Default implementation does nothing.
1066 :param keyword: Keyword to process.
1067 :type keyword: Keyword
1071 if keyword.type == u"setup":
1072 self.visit_setup_kw(keyword)
1073 elif keyword.type == u"teardown":
1074 self.visit_teardown_kw(keyword)
1076 self.visit_test_kw(keyword)
1077 except AttributeError:
1080 def end_keyword(self, keyword):
1081 """Called when keyword ends. Default implementation does nothing.
1083 :param keyword: Keyword to process.
1084 :type keyword: Keyword
1088 def visit_test_kw(self, test_kw):
1089 """Implements traversing through the test keyword and its child
1092 :param test_kw: Keyword to process.
1093 :type test_kw: Keyword
1096 for keyword in test_kw.keywords:
1097 if self.start_test_kw(keyword) is not False:
1098 self.visit_test_kw(keyword)
1099 self.end_test_kw(keyword)
1101 def start_test_kw(self, test_kw):
1102 """Called when test keyword starts. Default implementation does
1105 :param test_kw: Keyword to process.
1106 :type test_kw: Keyword
1109 if test_kw.name.count(u"Show Runtime On All Duts") or \
1110 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1111 self._msg_type = u"test-show-runtime"
1112 self._sh_run_counter += 1
1113 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1114 self._msg_type = u"dpdk-version"
1117 test_kw.messages.visit(self)
1119 def end_test_kw(self, test_kw):
1120 """Called when keyword ends. Default implementation does nothing.
1122 :param test_kw: Keyword to process.
1123 :type test_kw: Keyword
1127 def visit_setup_kw(self, setup_kw):
1128 """Implements traversing through the teardown keyword and its child
1131 :param setup_kw: Keyword to process.
1132 :type setup_kw: Keyword
1135 for keyword in setup_kw.keywords:
1136 if self.start_setup_kw(keyword) is not False:
1137 self.visit_setup_kw(keyword)
1138 self.end_setup_kw(keyword)
1140 def start_setup_kw(self, setup_kw):
1141 """Called when teardown keyword starts. Default implementation does
1144 :param setup_kw: Keyword to process.
1145 :type setup_kw: Keyword
1148 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1149 and not self._version:
1150 self._msg_type = u"vpp-version"
1151 elif setup_kw.name.count(u"Set Global Variable") \
1152 and not self._timestamp:
1153 self._msg_type = u"timestamp"
1154 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1155 self._msg_type = u"testbed"
1158 setup_kw.messages.visit(self)
1160 def end_setup_kw(self, setup_kw):
1161 """Called when keyword ends. Default implementation does nothing.
1163 :param setup_kw: Keyword to process.
1164 :type setup_kw: Keyword
1168 def visit_teardown_kw(self, teardown_kw):
1169 """Implements traversing through the teardown keyword and its child
1172 :param teardown_kw: Keyword to process.
1173 :type teardown_kw: Keyword
1176 for keyword in teardown_kw.keywords:
1177 if self.start_teardown_kw(keyword) is not False:
1178 self.visit_teardown_kw(keyword)
1179 self.end_teardown_kw(keyword)
1181 def start_teardown_kw(self, teardown_kw):
1182 """Called when teardown keyword starts
1184 :param teardown_kw: Keyword to process.
1185 :type teardown_kw: Keyword
1189 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1190 # TODO: Remove when not needed:
1191 self._conf_history_lookup_nr = 0
1192 self._msg_type = u"teardown-vat-history"
1193 teardown_kw.messages.visit(self)
1194 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1195 self._conf_history_lookup_nr = 0
1196 self._msg_type = u"teardown-papi-history"
1197 teardown_kw.messages.visit(self)
1199 def end_teardown_kw(self, teardown_kw):
1200 """Called when keyword ends. Default implementation does nothing.
1202 :param teardown_kw: Keyword to process.
1203 :type teardown_kw: Keyword
1207 def visit_message(self, msg):
1208 """Implements visiting the message.
1210 :param msg: Message to process.
1214 if self.start_message(msg) is not False:
1215 self.end_message(msg)
1217 def start_message(self, msg):
1218 """Called when message starts. Get required information from messages:
1221 :param msg: Message to process.
1227 self.parse_msg[self._msg_type](msg)
1229 def end_message(self, msg):
1230 """Called when message ends. Default implementation does nothing.
1232 :param msg: Message to process.
1241 The data is extracted from output.xml files generated by Jenkins jobs and
1242 stored in pandas' DataFrames.
1248 (as described in ExecutionChecker documentation)
1250 (as described in ExecutionChecker documentation)
1252 (as described in ExecutionChecker documentation)
1255 def __init__(self, spec):
1258 :param spec: Specification.
1259 :type spec: Specification
1266 self._input_data = pd.Series()
1270 """Getter - Input data.
1272 :returns: Input data
1273 :rtype: pandas.Series
1275 return self._input_data
1277 def metadata(self, job, build):
1278 """Getter - metadata
1280 :param job: Job which metadata we want.
1281 :param build: Build which metadata we want.
1285 :rtype: pandas.Series
1288 return self.data[job][build][u"metadata"]
1290 def suites(self, job, build):
1293 :param job: Job which suites we want.
1294 :param build: Build which suites we want.
1298 :rtype: pandas.Series
1301 return self.data[job][str(build)][u"suites"]
1303 def tests(self, job, build):
1306 :param job: Job which tests we want.
1307 :param build: Build which tests we want.
1311 :rtype: pandas.Series
1314 return self.data[job][build][u"tests"]
1316 def _parse_tests(self, job, build, log):
1317 """Process data from robot output.xml file and return JSON structured
1320 :param job: The name of job which build output data will be processed.
1321 :param build: The build which output data will be processed.
1322 :param log: List of log messages.
1325 :type log: list of tuples (severity, msg)
1326 :returns: JSON data structure.
1335 with open(build[u"file-name"], u'r') as data_file:
1337 result = ExecutionResult(data_file)
1338 except errors.DataError as err:
1340 (u"ERROR", f"Error occurred while parsing output.xml: "
1344 checker = ExecutionChecker(metadata, self._cfg.mapping,
1346 result.visit(checker)
1350 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1351 """Download and parse the input data file.
1353 :param pid: PID of the process executing this method.
1354 :param job: Name of the Jenkins job which generated the processed input
1356 :param build: Information about the Jenkins build which generated the
1357 processed input file.
1358 :param repeat: Repeat the download specified number of times if not
1369 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1377 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1385 f"It is not possible to download the input data file from the "
1386 f"job {job}, build {build[u'build']}, or it is damaged. "
1392 f" Processing data from the build {build[u'build']} ...")
1394 data = self._parse_tests(job, build, logs)
1398 f"Input data file from the job {job}, build "
1399 f"{build[u'build']} is damaged. Skipped.")
1402 state = u"processed"
1405 remove(build[u"file-name"])
1406 except OSError as err:
1408 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1412 # If the time-period is defined in the specification file, remove all
1413 # files which are outside the time period.
1414 timeperiod = self._cfg.input.get(u"time-period", None)
1415 if timeperiod and data:
1417 timeperiod = timedelta(int(timeperiod))
1418 metadata = data.get(u"metadata", None)
1420 generated = metadata.get(u"generated", None)
1422 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1423 if (now - generated) > timeperiod:
1424 # Remove the data and the file:
1429 f" The build {job}/{build[u'build']} is "
1430 f"outdated, will be removed.")
1432 logs.append((u"INFO", u" Done."))
1434 for level, line in logs:
1435 if level == u"INFO":
1437 elif level == u"ERROR":
1439 elif level == u"DEBUG":
1441 elif level == u"CRITICAL":
1442 logging.critical(line)
1443 elif level == u"WARNING":
1444 logging.warning(line)
1446 return {u"data": data, u"state": state, u"job": job, u"build": build}
1448 def download_and_parse_data(self, repeat=1):
1449 """Download the input data files, parse input data from input files and
1450 store in pandas' Series.
1452 :param repeat: Repeat the download specified number of times if not
1457 logging.info(u"Downloading and parsing input files ...")
1459 for job, builds in self._cfg.builds.items():
1460 for build in builds:
1462 result = self._download_and_parse_build(job, build, repeat)
1463 build_nr = result[u"build"][u"build"]
1466 data = result[u"data"]
1467 build_data = pd.Series({
1468 u"metadata": pd.Series(
1469 list(data[u"metadata"].values()),
1470 index=list(data[u"metadata"].keys())
1472 u"suites": pd.Series(
1473 list(data[u"suites"].values()),
1474 index=list(data[u"suites"].keys())
1476 u"tests": pd.Series(
1477 list(data[u"tests"].values()),
1478 index=list(data[u"tests"].keys())
1482 if self._input_data.get(job, None) is None:
1483 self._input_data[job] = pd.Series()
1484 self._input_data[job][str(build_nr)] = build_data
1486 self._cfg.set_input_file_name(
1487 job, build_nr, result[u"build"][u"file-name"])
1489 self._cfg.set_input_state(job, build_nr, result[u"state"])
1492 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1493 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1495 logging.info(u"Done.")
1498 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1499 """Return the index of character in the string which is the end of tag.
1501 :param tag_filter: The string where the end of tag is being searched.
1502 :param start: The index where the searching is stated.
1503 :param closer: The character which is the tag closer.
1504 :type tag_filter: str
1507 :returns: The index of the tag closer.
1512 idx_opener = tag_filter.index(closer, start)
1513 return tag_filter.index(closer, idx_opener + 1)
1518 def _condition(tag_filter):
1519 """Create a conditional statement from the given tag filter.
1521 :param tag_filter: Filter based on tags from the element specification.
1522 :type tag_filter: str
1523 :returns: Conditional statement which can be evaluated.
1529 index = InputData._end_of_tag(tag_filter, index)
1533 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1535 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1536 continue_on_error=False):
1537 """Filter required data from the given jobs and builds.
1539 The output data structure is:
1543 - test (or suite) 1 ID:
1549 - test (or suite) n ID:
1556 :param element: Element which will use the filtered data.
1557 :param params: Parameters which will be included in the output. If None,
1558 all parameters are included.
1559 :param data: If not None, this data is used instead of data specified
1561 :param data_set: The set of data to be filtered: tests, suites,
1563 :param continue_on_error: Continue if there is error while reading the
1564 data. The Item will be empty then
1565 :type element: pandas.Series
1569 :type continue_on_error: bool
1570 :returns: Filtered data.
1571 :rtype pandas.Series
1575 if data_set == "suites":
1577 elif element[u"filter"] in (u"all", u"template"):
1580 cond = InputData._condition(element[u"filter"])
1581 logging.debug(f" Filter: {cond}")
1583 logging.error(u" No filter defined.")
1587 params = element.get(u"parameters", None)
1589 params.append(u"type")
1591 data_to_filter = data if data else element[u"data"]
1594 for job, builds in data_to_filter.items():
1595 data[job] = pd.Series()
1596 for build in builds:
1597 data[job][str(build)] = pd.Series()
1600 self.data[job][str(build)][data_set].items())
1602 if continue_on_error:
1606 for test_id, test_data in data_dict.items():
1607 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1608 data[job][str(build)][test_id] = pd.Series()
1610 for param, val in test_data.items():
1611 data[job][str(build)][test_id][param] = val
1613 for param in params:
1615 data[job][str(build)][test_id][param] =\
1618 data[job][str(build)][test_id][param] =\
1622 except (KeyError, IndexError, ValueError) as err:
1624 f"Missing mandatory parameter in the element specification: "
1628 except AttributeError as err:
1629 logging.error(repr(err))
1631 except SyntaxError as err:
1633 f"The filter {cond} is not correct. Check if all tags are "
1634 f"enclosed by apostrophes.\n{repr(err)}"
1638 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1639 continue_on_error=False):
1640 """Filter required data from the given jobs and builds.
1642 The output data structure is:
1646 - test (or suite) 1 ID:
1652 - test (or suite) n ID:
1659 :param element: Element which will use the filtered data.
1660 :param params: Parameters which will be included in the output. If None,
1661 all parameters are included.
1662 :param data_set: The set of data to be filtered: tests, suites,
1664 :param continue_on_error: Continue if there is error while reading the
1665 data. The Item will be empty then
1666 :type element: pandas.Series
1669 :type continue_on_error: bool
1670 :returns: Filtered data.
1671 :rtype pandas.Series
1674 include = element.get(u"include", None)
1676 logging.warning(u"No tests to include, skipping the element.")
1680 params = element.get(u"parameters", None)
1682 params.append(u"type")
1686 for job, builds in element[u"data"].items():
1687 data[job] = pd.Series()
1688 for build in builds:
1689 data[job][str(build)] = pd.Series()
1690 for test in include:
1692 reg_ex = re.compile(str(test).lower())
1693 for test_id in self.data[job][
1694 str(build)][data_set].keys():
1695 if re.match(reg_ex, str(test_id).lower()):
1696 test_data = self.data[job][
1697 str(build)][data_set][test_id]
1698 data[job][str(build)][test_id] = pd.Series()
1700 for param, val in test_data.items():
1701 data[job][str(build)][test_id]\
1704 for param in params:
1706 data[job][str(build)][
1710 data[job][str(build)][
1711 test_id][param] = u"No Data"
1712 except KeyError as err:
1713 logging.error(repr(err))
1714 if continue_on_error:
1719 except (KeyError, IndexError, ValueError) as err:
1721 f"Missing mandatory parameter in the element "
1722 f"specification: {repr(err)}"
1725 except AttributeError as err:
1726 logging.error(repr(err))
1730 def merge_data(data):
1731 """Merge data from more jobs and builds to a simple data structure.
1733 The output data structure is:
1735 - test (suite) 1 ID:
1741 - test (suite) n ID:
1744 :param data: Data to merge.
1745 :type data: pandas.Series
1746 :returns: Merged data.
1747 :rtype: pandas.Series
1750 logging.info(u" Merging data ...")
1752 merged_data = pd.Series()
1753 for builds in data.values:
1754 for item in builds.values:
1755 for item_id, item_data in item.items():
1756 merged_data[item_id] = item_data
1760 def print_all_oper_data(self):
1761 """Print all operational data to console.
1769 u"Cycles per Packet",
1770 u"Average Vector Size"
1773 for job in self._input_data.values:
1774 for build in job.values:
1775 for test_id, test_data in build[u"tests"].items():
1777 if test_data.get(u"show-run", None) is None:
1779 for dut_name, data in test_data[u"show-run"].items():
1780 if data.get(u"threads", None) is None:
1782 print(f"Host IP: {data.get(u'host', '')}, "
1783 f"Socket: {data.get(u'socket', '')}")
1784 for thread_nr, thread in data[u"threads"].items():
1785 txt_table = prettytable.PrettyTable(tbl_hdr)
1788 txt_table.add_row(row)
1790 if len(thread) == 0:
1793 avg = f", Average Vector Size per Node: " \
1794 f"{(avg / len(thread)):.2f}"
1795 th_name = u"main" if thread_nr == 0 \
1796 else f"worker_{thread_nr}"
1797 print(f"{dut_name}, {th_name}{avg}")
1798 txt_table.float_format = u".2"
1799 txt_table.align = u"r"
1800 txt_table.align[u"Name"] = u"l"
1801 print(f"{txt_table.get_string()}\n")