1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
223 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
225 # TODO: Remove when not needed
226 REGEX_NDRPDR_LAT_BASE = re.compile(
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
230 REGEX_NDRPDR_LAT = re.compile(
231 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
232 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
236 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
238 # TODO: Remove when not needed
239 REGEX_NDRPDR_LAT_LONG = re.compile(
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
241 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
248 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
250 REGEX_VERSION_VPP = re.compile(
251 r"(return STDOUT Version:\s*|"
252 r"VPP Version:\s*|VPP version:\s*)(.*)"
254 REGEX_VERSION_DPDK = re.compile(
255 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
257 REGEX_TCP = re.compile(
258 r'Total\s(rps|cps|throughput):\s(\d*).*$'
260 REGEX_MRR = re.compile(
261 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
262 r'tx\s(\d*),\srx\s(\d*)'
264 REGEX_BMRR = re.compile(
265 r'Maximum Receive Rate trial results'
266 r' in packets per second: \[(.*)\]'
268 REGEX_RECONF_LOSS = re.compile(
269 r'Packets lost due to reconfig: (\d*)'
271 REGEX_RECONF_TIME = re.compile(
272 r'Implied time lost: (\d*.[\de-]*)'
274 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
276 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
278 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
280 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
282 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
284 def __init__(self, metadata, mapping, ignore):
287 :param metadata: Key-value pairs to be included in "metadata" part of
289 :param mapping: Mapping of the old names of test cases to the new
291 :param ignore: List of TCs to be ignored.
297 # Type of message to parse out from the test messages
298 self._msg_type = None
304 self._timestamp = None
306 # Testbed. The testbed is identified by TG node IP address.
309 # Mapping of TCs long names
310 self._mapping = mapping
313 self._ignore = ignore
315 # Number of PAPI History messages found:
317 # 1 - PAPI History of DUT1
318 # 2 - PAPI History of DUT2
319 self._conf_history_lookup_nr = 0
321 self._sh_run_counter = 0
323 # Test ID of currently processed test- the lowercase full path to the
327 # The main data structure
329 u"metadata": OrderedDict(),
330 u"suites": OrderedDict(),
331 u"tests": OrderedDict()
334 # Save the provided metadata
335 for key, val in metadata.items():
336 self._data[u"metadata"][key] = val
338 # Dictionary defining the methods used to parse different types of
341 u"timestamp": self._get_timestamp,
342 u"vpp-version": self._get_vpp_version,
343 u"dpdk-version": self._get_dpdk_version,
344 # TODO: Remove when not needed:
345 u"teardown-vat-history": self._get_vat_history,
346 u"teardown-papi-history": self._get_papi_history,
347 u"test-show-runtime": self._get_show_run,
348 u"testbed": self._get_testbed
353 """Getter - Data parsed from the XML file.
355 :returns: Data parsed from the XML file.
360 def _get_data_from_mrr_test_msg(self, msg):
361 """Get info from message of MRR performance tests.
363 :param msg: Message to be processed.
365 :returns: Processed message or original message if a problem occurs.
369 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
370 if not groups or groups.lastindex != 1:
374 data = groups.group(1).split(u", ")
375 except (AttributeError, IndexError, ValueError, KeyError):
381 out_str += f"{(float(item) / 1e6):.2f}, "
382 return out_str[:-2] + u"]"
383 except (AttributeError, IndexError, ValueError, KeyError):
386 def _get_data_from_perf_test_msg(self, msg):
387 """Get info from message of NDRPDR performance tests.
389 :param msg: Message to be processed.
391 :returns: Processed message or original message if a problem occurs.
395 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
396 if not groups or groups.lastindex != 10:
401 u"ndr_low": float(groups.group(1)),
402 u"ndr_low_b": float(groups.group(2)),
403 u"pdr_low": float(groups.group(3)),
404 u"pdr_low_b": float(groups.group(4)),
405 u"pdr_lat_90_1": groups.group(5),
406 u"pdr_lat_90_2": groups.group(6),
407 u"pdr_lat_50_1": groups.group(7),
408 u"pdr_lat_50_2": groups.group(8),
409 u"pdr_lat_10_1": groups.group(9),
410 u"pdr_lat_10_2": groups.group(10),
412 except (AttributeError, IndexError, ValueError, KeyError):
415 def _process_lat(in_str_1, in_str_2):
416 """Extract min, avg, max values from latency string.
418 :param in_str_1: Latency string for one direction produced by robot
420 :param in_str_2: Latency string for second direction produced by
424 :returns: Processed latency string or empty string if a problem
426 :rtype: tuple(str, str)
428 in_list_1 = in_str_1.split('/', 3)
429 in_list_2 = in_str_2.split('/', 3)
431 if len(in_list_1) != 4 and len(in_list_2) != 4:
434 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
436 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
437 except hdrh.codec.HdrLengthException:
440 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
442 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
443 except hdrh.codec.HdrLengthException:
446 if hdr_lat_1 and hdr_lat_2:
447 hdr_lat_1_50 = hdr_lat_1.get_value_at_percentile(50.0)
448 hdr_lat_1_90 = hdr_lat_1.get_value_at_percentile(90.0)
449 hdr_lat_1_99 = hdr_lat_1.get_value_at_percentile(99.0)
450 hdr_lat_2_50 = hdr_lat_2.get_value_at_percentile(50.0)
451 hdr_lat_2_90 = hdr_lat_2.get_value_at_percentile(90.0)
452 hdr_lat_2_99 = hdr_lat_2.get_value_at_percentile(99.0)
454 if (hdr_lat_1_50 + hdr_lat_1_90 + hdr_lat_1_99 +
455 hdr_lat_2_50 + hdr_lat_2_90 + hdr_lat_2_99):
457 f"{hdr_lat_1_50} {hdr_lat_1_90} {hdr_lat_1_99} , "
458 f"{hdr_lat_2_50} {hdr_lat_2_90} {hdr_lat_2_99}"
464 pdr_lat_10 = _process_lat(data[u'pdr_lat_10_1'],
465 data[u'pdr_lat_10_2'])
466 pdr_lat_50 = _process_lat(data[u'pdr_lat_50_1'],
467 data[u'pdr_lat_50_2'])
468 pdr_lat_90 = _process_lat(data[u'pdr_lat_90_1'],
469 data[u'pdr_lat_90_2'])
470 pdr_lat_10 = f"\n3. {pdr_lat_10}" if pdr_lat_10 else u""
471 pdr_lat_50 = f"\n4. {pdr_lat_50}" if pdr_lat_50 else u""
472 pdr_lat_90 = f"\n5. {pdr_lat_90}" if pdr_lat_90 else u""
475 f"1. {(data[u'ndr_low'] / 1e6):.2f} {data[u'ndr_low_b']:.2f}"
476 f"\n2. {(data[u'pdr_low'] / 1e6):.2f} {data[u'pdr_low_b']:.2f}"
481 except (AttributeError, IndexError, ValueError, KeyError):
484 def _get_testbed(self, msg):
485 """Called when extraction of testbed IP is required.
486 The testbed is identified by TG node IP address.
488 :param msg: Message to process.
493 if msg.message.count(u"Setup of TG node") or \
494 msg.message.count(u"Setup of node TG host"):
495 reg_tg_ip = re.compile(
496 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
498 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
499 except (KeyError, ValueError, IndexError, AttributeError):
502 self._data[u"metadata"][u"testbed"] = self._testbed
503 self._msg_type = None
505 def _get_vpp_version(self, msg):
506 """Called when extraction of VPP version is required.
508 :param msg: Message to process.
513 if msg.message.count(u"return STDOUT Version:") or \
514 msg.message.count(u"VPP Version:") or \
515 msg.message.count(u"VPP version:"):
516 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
518 self._data[u"metadata"][u"version"] = self._version
519 self._msg_type = None
521 def _get_dpdk_version(self, msg):
522 """Called when extraction of DPDK version is required.
524 :param msg: Message to process.
529 if msg.message.count(u"DPDK Version:"):
531 self._version = str(re.search(
532 self.REGEX_VERSION_DPDK, msg.message).group(2))
533 self._data[u"metadata"][u"version"] = self._version
537 self._msg_type = None
539 def _get_timestamp(self, msg):
540 """Called when extraction of timestamp is required.
542 :param msg: Message to process.
547 self._timestamp = msg.timestamp[:14]
548 self._data[u"metadata"][u"generated"] = self._timestamp
549 self._msg_type = None
551 def _get_vat_history(self, msg):
552 """Called when extraction of VAT command history is required.
554 TODO: Remove when not needed.
556 :param msg: Message to process.
560 if msg.message.count(u"VAT command history:"):
561 self._conf_history_lookup_nr += 1
562 if self._conf_history_lookup_nr == 1:
563 self._data[u"tests"][self._test_id][u"conf-history"] = str()
565 self._msg_type = None
566 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
567 r"VAT command history:", u"",
568 msg.message, count=1).replace(u'\n', u' |br| ').\
571 self._data[u"tests"][self._test_id][u"conf-history"] += (
572 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
575 def _get_papi_history(self, msg):
576 """Called when extraction of PAPI command history is required.
578 :param msg: Message to process.
582 if msg.message.count(u"PAPI command history:"):
583 self._conf_history_lookup_nr += 1
584 if self._conf_history_lookup_nr == 1:
585 self._data[u"tests"][self._test_id][u"conf-history"] = str()
587 self._msg_type = None
588 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
589 r"PAPI command history:", u"",
590 msg.message, count=1).replace(u'\n', u' |br| ').\
592 self._data[u"tests"][self._test_id][u"conf-history"] += (
593 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
596 def _get_show_run(self, msg):
597 """Called when extraction of VPP operational data (output of CLI command
598 Show Runtime) is required.
600 :param msg: Message to process.
605 if not msg.message.count(u"stats runtime"):
609 if self._sh_run_counter > 1:
612 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
613 self._data[u"tests"][self._test_id][u"show-run"] = dict()
615 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
619 host = groups.group(1)
620 except (AttributeError, IndexError):
623 sock = groups.group(2)
624 except (AttributeError, IndexError):
627 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
628 replace(u"'", u'"').replace(u'b"', u'"').
629 replace(u'u"', u'"').split(u":", 1)[1])
632 threads_nr = len(runtime[0][u"clocks"])
633 except (IndexError, KeyError):
636 dut = u"DUT{nr}".format(
637 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
642 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
646 for idx in range(threads_nr):
647 if item[u"vectors"][idx] > 0:
648 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
649 elif item[u"calls"][idx] > 0:
650 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
651 elif item[u"suspends"][idx] > 0:
652 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
656 if item[u"calls"][idx] > 0:
657 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
661 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
662 int(item[u"suspends"][idx]):
663 oper[u"threads"][idx].append([
666 item[u"vectors"][idx],
667 item[u"suspends"][idx],
672 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
674 def _get_ndrpdr_throughput(self, msg):
675 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
678 :param msg: The test message to be parsed.
680 :returns: Parsed data as a dict and the status (PASS/FAIL).
681 :rtype: tuple(dict, str)
685 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
686 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
689 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
691 if groups is not None:
693 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
694 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
695 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
696 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
698 except (IndexError, ValueError):
701 return throughput, status
703 def _get_plr_throughput(self, msg):
704 """Get PLRsearch lower bound and PLRsearch upper bound from the test
707 :param msg: The test message to be parsed.
709 :returns: Parsed data as a dict and the status (PASS/FAIL).
710 :rtype: tuple(dict, str)
718 groups = re.search(self.REGEX_PLR_RATE, msg)
720 if groups is not None:
722 throughput[u"LOWER"] = float(groups.group(1))
723 throughput[u"UPPER"] = float(groups.group(2))
725 except (IndexError, ValueError):
728 return throughput, status
730 def _get_ndrpdr_latency(self, msg):
731 """Get LATENCY from the test message.
733 :param msg: The test message to be parsed.
735 :returns: Parsed data as a dict and the status (PASS/FAIL).
736 :rtype: tuple(dict, str)
746 u"direction1": copy.copy(latency_default),
747 u"direction2": copy.copy(latency_default)
750 u"direction1": copy.copy(latency_default),
751 u"direction2": copy.copy(latency_default)
754 u"direction1": copy.copy(latency_default),
755 u"direction2": copy.copy(latency_default)
758 u"direction1": copy.copy(latency_default),
759 u"direction2": copy.copy(latency_default)
762 u"direction1": copy.copy(latency_default),
763 u"direction2": copy.copy(latency_default)
766 u"direction1": copy.copy(latency_default),
767 u"direction2": copy.copy(latency_default)
771 # TODO: Rewrite when long and base are not needed
772 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
774 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
776 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
778 return latency, u"FAIL"
780 def process_latency(in_str):
781 """Return object with parsed latency values.
783 TODO: Define class for the return type.
785 :param in_str: Input string, min/avg/max/hdrh format.
787 :returns: Dict with corresponding keys, except hdrh float values.
789 :throws IndexError: If in_str does not have enough substrings.
790 :throws ValueError: If a substring does not convert to float.
792 in_list = in_str.split('/', 3)
795 u"min": float(in_list[0]),
796 u"avg": float(in_list[1]),
797 u"max": float(in_list[2]),
801 if len(in_list) == 4:
802 rval[u"hdrh"] = str(in_list[3])
807 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
808 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
809 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
810 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
811 if groups.lastindex == 4:
812 return latency, u"PASS"
813 except (IndexError, ValueError):
817 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
818 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
819 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
820 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
821 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
822 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
823 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
824 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
825 if groups.lastindex == 12:
826 return latency, u"PASS"
827 except (IndexError, ValueError):
830 # TODO: Remove when not needed
831 latency[u"NDR10"] = {
832 u"direction1": copy.copy(latency_default),
833 u"direction2": copy.copy(latency_default)
835 latency[u"NDR50"] = {
836 u"direction1": copy.copy(latency_default),
837 u"direction2": copy.copy(latency_default)
839 latency[u"NDR90"] = {
840 u"direction1": copy.copy(latency_default),
841 u"direction2": copy.copy(latency_default)
844 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
845 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
846 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
847 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
848 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
849 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
850 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
851 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
852 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
853 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
854 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
855 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
856 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
857 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
858 return latency, u"PASS"
859 except (IndexError, ValueError):
862 return latency, u"FAIL"
864 def visit_suite(self, suite):
865 """Implements traversing through the suite and its direct children.
867 :param suite: Suite to process.
871 if self.start_suite(suite) is not False:
872 suite.suites.visit(self)
873 suite.tests.visit(self)
874 self.end_suite(suite)
876 def start_suite(self, suite):
877 """Called when suite starts.
879 :param suite: Suite to process.
885 parent_name = suite.parent.name
886 except AttributeError:
889 doc_str = suite.doc.\
890 replace(u'"', u"'").\
891 replace(u'\n', u' ').\
892 replace(u'\r', u'').\
893 replace(u'*[', u' |br| *[').\
894 replace(u"*", u"**").\
895 replace(u' |br| *[', u'*[', 1)
897 self._data[u"suites"][suite.longname.lower().
899 replace(u" ", u"_")] = {
900 u"name": suite.name.lower(),
902 u"parent": parent_name,
903 u"level": len(suite.longname.split(u"."))
906 suite.keywords.visit(self)
908 def end_suite(self, suite):
909 """Called when suite ends.
911 :param suite: Suite to process.
916 def visit_test(self, test):
917 """Implements traversing through the test.
919 :param test: Test to process.
923 if self.start_test(test) is not False:
924 test.keywords.visit(self)
927 def start_test(self, test):
928 """Called when test starts.
930 :param test: Test to process.
935 self._sh_run_counter = 0
937 longname_orig = test.longname.lower()
939 # Check the ignore list
940 if longname_orig in self._ignore:
943 tags = [str(tag) for tag in test.tags]
946 # Change the TC long name and name if defined in the mapping table
947 longname = self._mapping.get(longname_orig, None)
948 if longname is not None:
949 name = longname.split(u'.')[-1]
951 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
955 longname = longname_orig
956 name = test.name.lower()
958 # Remove TC number from the TC long name (backward compatibility):
959 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
960 # Remove TC number from the TC name (not needed):
961 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
963 test_result[u"parent"] = test.parent.name.lower()
964 test_result[u"tags"] = tags
965 test_result["doc"] = test.doc.\
966 replace(u'"', u"'").\
967 replace(u'\n', u' ').\
968 replace(u'\r', u'').\
969 replace(u'[', u' |br| [').\
970 replace(u' |br| [', u'[', 1)
971 test_result[u"msg"] = test.message.\
972 replace(u'\n', u' |br| ').\
973 replace(u'\r', u'').\
975 test_result[u"type"] = u"FUNC"
976 test_result[u"status"] = test.status
978 if u"PERFTEST" in tags:
979 # Replace info about cores (e.g. -1c-) with the info about threads
980 # and cores (e.g. -1t1c-) in the long test case names and in the
981 # test case names if necessary.
982 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
986 for tag in test_result[u"tags"]:
987 groups = re.search(self.REGEX_TC_TAG, tag)
993 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
994 f"-{tag_tc.lower()}-",
997 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
998 f"-{tag_tc.lower()}-",
1002 test_result[u"status"] = u"FAIL"
1003 self._data[u"tests"][self._test_id] = test_result
1005 f"The test {self._test_id} has no or more than one "
1006 f"multi-threading tags.\n"
1007 f"Tags: {test_result[u'tags']}"
1011 if test.status == u"PASS":
1012 if u"NDRPDR" in tags:
1013 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1015 replace(u'\n', u' |br| '). \
1016 replace(u'\r', u''). \
1018 test_result[u"type"] = u"NDRPDR"
1019 test_result[u"throughput"], test_result[u"status"] = \
1020 self._get_ndrpdr_throughput(test.message)
1021 test_result[u"latency"], test_result[u"status"] = \
1022 self._get_ndrpdr_latency(test.message)
1023 elif u"SOAK" in tags:
1024 test_result[u"type"] = u"SOAK"
1025 test_result[u"throughput"], test_result[u"status"] = \
1026 self._get_plr_throughput(test.message)
1027 elif u"TCP" in tags:
1028 test_result[u"type"] = u"TCP"
1029 groups = re.search(self.REGEX_TCP, test.message)
1030 test_result[u"result"] = int(groups.group(2))
1031 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1032 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1034 replace(u'\n', u' |br| '). \
1035 replace(u'\r', u''). \
1038 test_result[u"type"] = u"MRR"
1040 test_result[u"type"] = u"BMRR"
1042 test_result[u"result"] = dict()
1043 groups = re.search(self.REGEX_BMRR, test.message)
1044 if groups is not None:
1045 items_str = groups.group(1)
1046 items_float = [float(item.strip()) for item
1047 in items_str.split(",")]
1048 # Use whole list in CSIT-1180.
1049 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1050 test_result[u"result"][u"receive-rate"] = stats.avg
1052 groups = re.search(self.REGEX_MRR, test.message)
1053 test_result[u"result"][u"receive-rate"] = \
1054 float(groups.group(3)) / float(groups.group(1))
1055 elif u"RECONF" in tags:
1056 test_result[u"type"] = u"RECONF"
1057 test_result[u"result"] = None
1059 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1060 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1061 test_result[u"result"] = {
1062 u"loss": int(grps_loss.group(1)),
1063 u"time": float(grps_time.group(1))
1065 except (AttributeError, IndexError, ValueError, TypeError):
1066 test_result[u"status"] = u"FAIL"
1067 elif u"DEVICETEST" in tags:
1068 test_result[u"type"] = u"DEVICETEST"
1070 test_result[u"status"] = u"FAIL"
1071 self._data[u"tests"][self._test_id] = test_result
1074 self._data[u"tests"][self._test_id] = test_result
1076 def end_test(self, test):
1077 """Called when test ends.
1079 :param test: Test to process.
1084 def visit_keyword(self, keyword):
1085 """Implements traversing through the keyword and its child keywords.
1087 :param keyword: Keyword to process.
1088 :type keyword: Keyword
1091 if self.start_keyword(keyword) is not False:
1092 self.end_keyword(keyword)
1094 def start_keyword(self, keyword):
1095 """Called when keyword starts. Default implementation does nothing.
1097 :param keyword: Keyword to process.
1098 :type keyword: Keyword
1102 if keyword.type == u"setup":
1103 self.visit_setup_kw(keyword)
1104 elif keyword.type == u"teardown":
1105 self.visit_teardown_kw(keyword)
1107 self.visit_test_kw(keyword)
1108 except AttributeError:
1111 def end_keyword(self, keyword):
1112 """Called when keyword ends. Default implementation does nothing.
1114 :param keyword: Keyword to process.
1115 :type keyword: Keyword
1119 def visit_test_kw(self, test_kw):
1120 """Implements traversing through the test keyword and its child
1123 :param test_kw: Keyword to process.
1124 :type test_kw: Keyword
1127 for keyword in test_kw.keywords:
1128 if self.start_test_kw(keyword) is not False:
1129 self.visit_test_kw(keyword)
1130 self.end_test_kw(keyword)
1132 def start_test_kw(self, test_kw):
1133 """Called when test keyword starts. Default implementation does
1136 :param test_kw: Keyword to process.
1137 :type test_kw: Keyword
1140 if test_kw.name.count(u"Show Runtime On All Duts") or \
1141 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1142 self._msg_type = u"test-show-runtime"
1143 self._sh_run_counter += 1
1144 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1145 self._msg_type = u"dpdk-version"
1148 test_kw.messages.visit(self)
1150 def end_test_kw(self, test_kw):
1151 """Called when keyword ends. Default implementation does nothing.
1153 :param test_kw: Keyword to process.
1154 :type test_kw: Keyword
1158 def visit_setup_kw(self, setup_kw):
1159 """Implements traversing through the teardown keyword and its child
1162 :param setup_kw: Keyword to process.
1163 :type setup_kw: Keyword
1166 for keyword in setup_kw.keywords:
1167 if self.start_setup_kw(keyword) is not False:
1168 self.visit_setup_kw(keyword)
1169 self.end_setup_kw(keyword)
1171 def start_setup_kw(self, setup_kw):
1172 """Called when teardown keyword starts. Default implementation does
1175 :param setup_kw: Keyword to process.
1176 :type setup_kw: Keyword
1179 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1180 and not self._version:
1181 self._msg_type = u"vpp-version"
1182 elif setup_kw.name.count(u"Set Global Variable") \
1183 and not self._timestamp:
1184 self._msg_type = u"timestamp"
1185 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1186 self._msg_type = u"testbed"
1189 setup_kw.messages.visit(self)
1191 def end_setup_kw(self, setup_kw):
1192 """Called when keyword ends. Default implementation does nothing.
1194 :param setup_kw: Keyword to process.
1195 :type setup_kw: Keyword
1199 def visit_teardown_kw(self, teardown_kw):
1200 """Implements traversing through the teardown keyword and its child
1203 :param teardown_kw: Keyword to process.
1204 :type teardown_kw: Keyword
1207 for keyword in teardown_kw.keywords:
1208 if self.start_teardown_kw(keyword) is not False:
1209 self.visit_teardown_kw(keyword)
1210 self.end_teardown_kw(keyword)
1212 def start_teardown_kw(self, teardown_kw):
1213 """Called when teardown keyword starts
1215 :param teardown_kw: Keyword to process.
1216 :type teardown_kw: Keyword
1220 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1221 # TODO: Remove when not needed:
1222 self._conf_history_lookup_nr = 0
1223 self._msg_type = u"teardown-vat-history"
1224 teardown_kw.messages.visit(self)
1225 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1226 self._conf_history_lookup_nr = 0
1227 self._msg_type = u"teardown-papi-history"
1228 teardown_kw.messages.visit(self)
1230 def end_teardown_kw(self, teardown_kw):
1231 """Called when keyword ends. Default implementation does nothing.
1233 :param teardown_kw: Keyword to process.
1234 :type teardown_kw: Keyword
1238 def visit_message(self, msg):
1239 """Implements visiting the message.
1241 :param msg: Message to process.
1245 if self.start_message(msg) is not False:
1246 self.end_message(msg)
1248 def start_message(self, msg):
1249 """Called when message starts. Get required information from messages:
1252 :param msg: Message to process.
1258 self.parse_msg[self._msg_type](msg)
1260 def end_message(self, msg):
1261 """Called when message ends. Default implementation does nothing.
1263 :param msg: Message to process.
1272 The data is extracted from output.xml files generated by Jenkins jobs and
1273 stored in pandas' DataFrames.
1279 (as described in ExecutionChecker documentation)
1281 (as described in ExecutionChecker documentation)
1283 (as described in ExecutionChecker documentation)
1286 def __init__(self, spec):
1289 :param spec: Specification.
1290 :type spec: Specification
1297 self._input_data = pd.Series()
1301 """Getter - Input data.
1303 :returns: Input data
1304 :rtype: pandas.Series
1306 return self._input_data
1308 def metadata(self, job, build):
1309 """Getter - metadata
1311 :param job: Job which metadata we want.
1312 :param build: Build which metadata we want.
1316 :rtype: pandas.Series
1319 return self.data[job][build][u"metadata"]
1321 def suites(self, job, build):
1324 :param job: Job which suites we want.
1325 :param build: Build which suites we want.
1329 :rtype: pandas.Series
1332 return self.data[job][str(build)][u"suites"]
1334 def tests(self, job, build):
1337 :param job: Job which tests we want.
1338 :param build: Build which tests we want.
1342 :rtype: pandas.Series
1345 return self.data[job][build][u"tests"]
1347 def _parse_tests(self, job, build, log):
1348 """Process data from robot output.xml file and return JSON structured
1351 :param job: The name of job which build output data will be processed.
1352 :param build: The build which output data will be processed.
1353 :param log: List of log messages.
1356 :type log: list of tuples (severity, msg)
1357 :returns: JSON data structure.
1366 with open(build[u"file-name"], u'r') as data_file:
1368 result = ExecutionResult(data_file)
1369 except errors.DataError as err:
1371 (u"ERROR", f"Error occurred while parsing output.xml: "
1375 checker = ExecutionChecker(metadata, self._cfg.mapping,
1377 result.visit(checker)
1381 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1382 """Download and parse the input data file.
1384 :param pid: PID of the process executing this method.
1385 :param job: Name of the Jenkins job which generated the processed input
1387 :param build: Information about the Jenkins build which generated the
1388 processed input file.
1389 :param repeat: Repeat the download specified number of times if not
1400 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1408 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1416 f"It is not possible to download the input data file from the "
1417 f"job {job}, build {build[u'build']}, or it is damaged. "
1423 f" Processing data from the build {build[u'build']} ...")
1425 data = self._parse_tests(job, build, logs)
1429 f"Input data file from the job {job}, build "
1430 f"{build[u'build']} is damaged. Skipped.")
1433 state = u"processed"
1436 remove(build[u"file-name"])
1437 except OSError as err:
1439 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1443 # If the time-period is defined in the specification file, remove all
1444 # files which are outside the time period.
1445 timeperiod = self._cfg.input.get(u"time-period", None)
1446 if timeperiod and data:
1448 timeperiod = timedelta(int(timeperiod))
1449 metadata = data.get(u"metadata", None)
1451 generated = metadata.get(u"generated", None)
1453 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1454 if (now - generated) > timeperiod:
1455 # Remove the data and the file:
1460 f" The build {job}/{build[u'build']} is "
1461 f"outdated, will be removed.")
1463 logs.append((u"INFO", u" Done."))
1465 for level, line in logs:
1466 if level == u"INFO":
1468 elif level == u"ERROR":
1470 elif level == u"DEBUG":
1472 elif level == u"CRITICAL":
1473 logging.critical(line)
1474 elif level == u"WARNING":
1475 logging.warning(line)
1477 return {u"data": data, u"state": state, u"job": job, u"build": build}
1479 def download_and_parse_data(self, repeat=1):
1480 """Download the input data files, parse input data from input files and
1481 store in pandas' Series.
1483 :param repeat: Repeat the download specified number of times if not
1488 logging.info(u"Downloading and parsing input files ...")
1490 for job, builds in self._cfg.builds.items():
1491 for build in builds:
1493 result = self._download_and_parse_build(job, build, repeat)
1494 build_nr = result[u"build"][u"build"]
1497 data = result[u"data"]
1498 build_data = pd.Series({
1499 u"metadata": pd.Series(
1500 list(data[u"metadata"].values()),
1501 index=list(data[u"metadata"].keys())
1503 u"suites": pd.Series(
1504 list(data[u"suites"].values()),
1505 index=list(data[u"suites"].keys())
1507 u"tests": pd.Series(
1508 list(data[u"tests"].values()),
1509 index=list(data[u"tests"].keys())
1513 if self._input_data.get(job, None) is None:
1514 self._input_data[job] = pd.Series()
1515 self._input_data[job][str(build_nr)] = build_data
1517 self._cfg.set_input_file_name(
1518 job, build_nr, result[u"build"][u"file-name"])
1520 self._cfg.set_input_state(job, build_nr, result[u"state"])
1523 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1524 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1526 logging.info(u"Done.")
1529 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1530 """Return the index of character in the string which is the end of tag.
1532 :param tag_filter: The string where the end of tag is being searched.
1533 :param start: The index where the searching is stated.
1534 :param closer: The character which is the tag closer.
1535 :type tag_filter: str
1538 :returns: The index of the tag closer.
1543 idx_opener = tag_filter.index(closer, start)
1544 return tag_filter.index(closer, idx_opener + 1)
1549 def _condition(tag_filter):
1550 """Create a conditional statement from the given tag filter.
1552 :param tag_filter: Filter based on tags from the element specification.
1553 :type tag_filter: str
1554 :returns: Conditional statement which can be evaluated.
1560 index = InputData._end_of_tag(tag_filter, index)
1564 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1566 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1567 continue_on_error=False):
1568 """Filter required data from the given jobs and builds.
1570 The output data structure is:
1574 - test (or suite) 1 ID:
1580 - test (or suite) n ID:
1587 :param element: Element which will use the filtered data.
1588 :param params: Parameters which will be included in the output. If None,
1589 all parameters are included.
1590 :param data: If not None, this data is used instead of data specified
1592 :param data_set: The set of data to be filtered: tests, suites,
1594 :param continue_on_error: Continue if there is error while reading the
1595 data. The Item will be empty then
1596 :type element: pandas.Series
1600 :type continue_on_error: bool
1601 :returns: Filtered data.
1602 :rtype pandas.Series
1606 if data_set == "suites":
1608 elif element[u"filter"] in (u"all", u"template"):
1611 cond = InputData._condition(element[u"filter"])
1612 logging.debug(f" Filter: {cond}")
1614 logging.error(u" No filter defined.")
1618 params = element.get(u"parameters", None)
1620 params.append(u"type")
1622 data_to_filter = data if data else element[u"data"]
1625 for job, builds in data_to_filter.items():
1626 data[job] = pd.Series()
1627 for build in builds:
1628 data[job][str(build)] = pd.Series()
1631 self.data[job][str(build)][data_set].items())
1633 if continue_on_error:
1637 for test_id, test_data in data_dict.items():
1638 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1639 data[job][str(build)][test_id] = pd.Series()
1641 for param, val in test_data.items():
1642 data[job][str(build)][test_id][param] = val
1644 for param in params:
1646 data[job][str(build)][test_id][param] =\
1649 data[job][str(build)][test_id][param] =\
1653 except (KeyError, IndexError, ValueError) as err:
1655 f"Missing mandatory parameter in the element specification: "
1659 except AttributeError as err:
1660 logging.error(repr(err))
1662 except SyntaxError as err:
1664 f"The filter {cond} is not correct. Check if all tags are "
1665 f"enclosed by apostrophes.\n{repr(err)}"
1669 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1670 continue_on_error=False):
1671 """Filter required data from the given jobs and builds.
1673 The output data structure is:
1677 - test (or suite) 1 ID:
1683 - test (or suite) n ID:
1690 :param element: Element which will use the filtered data.
1691 :param params: Parameters which will be included in the output. If None,
1692 all parameters are included.
1693 :param data_set: The set of data to be filtered: tests, suites,
1695 :param continue_on_error: Continue if there is error while reading the
1696 data. The Item will be empty then
1697 :type element: pandas.Series
1700 :type continue_on_error: bool
1701 :returns: Filtered data.
1702 :rtype pandas.Series
1705 include = element.get(u"include", None)
1707 logging.warning(u"No tests to include, skipping the element.")
1711 params = element.get(u"parameters", None)
1713 params.append(u"type")
1717 for job, builds in element[u"data"].items():
1718 data[job] = pd.Series()
1719 for build in builds:
1720 data[job][str(build)] = pd.Series()
1721 for test in include:
1723 reg_ex = re.compile(str(test).lower())
1724 for test_id in self.data[job][
1725 str(build)][data_set].keys():
1726 if re.match(reg_ex, str(test_id).lower()):
1727 test_data = self.data[job][
1728 str(build)][data_set][test_id]
1729 data[job][str(build)][test_id] = pd.Series()
1731 for param, val in test_data.items():
1732 data[job][str(build)][test_id]\
1735 for param in params:
1737 data[job][str(build)][
1741 data[job][str(build)][
1742 test_id][param] = u"No Data"
1743 except KeyError as err:
1744 logging.error(repr(err))
1745 if continue_on_error:
1750 except (KeyError, IndexError, ValueError) as err:
1752 f"Missing mandatory parameter in the element "
1753 f"specification: {repr(err)}"
1756 except AttributeError as err:
1757 logging.error(repr(err))
1761 def merge_data(data):
1762 """Merge data from more jobs and builds to a simple data structure.
1764 The output data structure is:
1766 - test (suite) 1 ID:
1772 - test (suite) n ID:
1775 :param data: Data to merge.
1776 :type data: pandas.Series
1777 :returns: Merged data.
1778 :rtype: pandas.Series
1781 logging.info(u" Merging data ...")
1783 merged_data = pd.Series()
1784 for builds in data.values:
1785 for item in builds.values:
1786 for item_id, item_data in item.items():
1787 merged_data[item_id] = item_data
1791 def print_all_oper_data(self):
1792 """Print all operational data to console.
1800 u"Cycles per Packet",
1801 u"Average Vector Size"
1804 for job in self._input_data.values:
1805 for build in job.values:
1806 for test_id, test_data in build[u"tests"].items():
1808 if test_data.get(u"show-run", None) is None:
1810 for dut_name, data in test_data[u"show-run"].items():
1811 if data.get(u"threads", None) is None:
1813 print(f"Host IP: {data.get(u'host', '')}, "
1814 f"Socket: {data.get(u'socket', '')}")
1815 for thread_nr, thread in data[u"threads"].items():
1816 txt_table = prettytable.PrettyTable(tbl_hdr)
1819 txt_table.add_row(row)
1821 if len(thread) == 0:
1824 avg = f", Average Vector Size per Node: " \
1825 f"{(avg / len(thread)):.2f}"
1826 th_name = u"main" if thread_nr == 0 \
1827 else f"worker_{thread_nr}"
1828 print(f"{dut_name}, {th_name}{avg}")
1829 txt_table.float_format = u".2"
1830 txt_table.align = u"r"
1831 txt_table.align[u"Name"] = u"l"
1832 print(f"{txt_table.get_string()}\n")