1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
223 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
225 # TODO: Remove when not needed
226 REGEX_NDRPDR_LAT_BASE = re.compile(
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
230 REGEX_NDRPDR_LAT = re.compile(
231 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
232 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
236 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
238 # TODO: Remove when not needed
239 REGEX_NDRPDR_LAT_LONG = re.compile(
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
241 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
248 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
250 REGEX_VERSION_VPP = re.compile(
251 r"(return STDOUT Version:\s*|"
252 r"VPP Version:\s*|VPP version:\s*)(.*)"
254 REGEX_VERSION_DPDK = re.compile(
255 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
257 REGEX_TCP = re.compile(
258 r'Total\s(rps|cps|throughput):\s(\d*).*$'
260 REGEX_MRR = re.compile(
261 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
262 r'tx\s(\d*),\srx\s(\d*)'
264 REGEX_BMRR = re.compile(
265 r'Maximum Receive Rate trial results'
266 r' in packets per second: \[(.*)\]'
268 REGEX_RECONF_LOSS = re.compile(
269 r'Packets lost due to reconfig: (\d*)'
271 REGEX_RECONF_TIME = re.compile(
272 r'Implied time lost: (\d*.[\de-]*)'
274 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
276 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
278 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
280 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
282 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
284 def __init__(self, metadata, mapping, ignore):
287 :param metadata: Key-value pairs to be included in "metadata" part of
289 :param mapping: Mapping of the old names of test cases to the new
291 :param ignore: List of TCs to be ignored.
297 # Type of message to parse out from the test messages
298 self._msg_type = None
304 self._timestamp = None
306 # Testbed. The testbed is identified by TG node IP address.
309 # Mapping of TCs long names
310 self._mapping = mapping
313 self._ignore = ignore
315 # Number of PAPI History messages found:
317 # 1 - PAPI History of DUT1
318 # 2 - PAPI History of DUT2
319 self._conf_history_lookup_nr = 0
321 self._sh_run_counter = 0
323 # Test ID of currently processed test- the lowercase full path to the
327 # The main data structure
329 u"metadata": OrderedDict(),
330 u"suites": OrderedDict(),
331 u"tests": OrderedDict()
334 # Save the provided metadata
335 for key, val in metadata.items():
336 self._data[u"metadata"][key] = val
338 # Dictionary defining the methods used to parse different types of
341 u"timestamp": self._get_timestamp,
342 u"vpp-version": self._get_vpp_version,
343 u"dpdk-version": self._get_dpdk_version,
344 # TODO: Remove when not needed:
345 u"teardown-vat-history": self._get_vat_history,
346 u"teardown-papi-history": self._get_papi_history,
347 u"test-show-runtime": self._get_show_run,
348 u"testbed": self._get_testbed
353 """Getter - Data parsed from the XML file.
355 :returns: Data parsed from the XML file.
360 def _get_data_from_mrr_test_msg(self, msg):
361 """Get info from message of MRR performance tests.
363 :param msg: Message to be processed.
365 :returns: Processed message or original message if a problem occurs.
369 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
370 if not groups or groups.lastindex != 1:
374 data = groups.group(1).split(u", ")
375 except (AttributeError, IndexError, ValueError, KeyError):
381 out_str += f"{(float(item) / 1e6):.2f}, "
382 return out_str[:-2] + u"]"
383 except (AttributeError, IndexError, ValueError, KeyError):
386 def _get_data_from_perf_test_msg(self, msg):
387 """Get info from message of NDRPDR performance tests.
389 :param msg: Message to be processed.
391 :returns: Processed message or original message if a problem occurs.
395 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
396 if not groups or groups.lastindex != 10:
401 u"ndr_low": float(groups.group(1)),
402 u"ndr_low_b": float(groups.group(2)),
403 u"pdr_low": float(groups.group(3)),
404 u"pdr_low_b": float(groups.group(4)),
405 u"pdr_lat_90_1": groups.group(5),
406 u"pdr_lat_90_2": groups.group(6),
407 u"pdr_lat_50_1": groups.group(7),
408 u"pdr_lat_50_2": groups.group(8),
409 u"pdr_lat_10_1": groups.group(9),
410 u"pdr_lat_10_2": groups.group(10),
412 except (AttributeError, IndexError, ValueError, KeyError):
415 def _process_lat(in_str_1, in_str_2):
416 """Extract min, avg, max values from latency string.
418 :param in_str_1: Latency string for one direction produced by robot
420 :param in_str_2: Latency string for second direction produced by
424 :returns: Processed latency string or empty string if a problem
426 :rtype: tuple(str, str)
428 in_list_1 = in_str_1.split('/', 3)
429 in_list_2 = in_str_2.split('/', 3)
431 if len(in_list_1) != 4 and len(in_list_2) != 4:
434 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
436 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
437 except hdrh.codec.HdrLengthException:
440 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
442 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
443 except hdrh.codec.HdrLengthException:
446 if hdr_lat_1 and hdr_lat_2:
447 hdr_lat_1_50 = hdr_lat_1.get_value_at_percentile(50.0)
448 hdr_lat_1_90 = hdr_lat_1.get_value_at_percentile(90.0)
449 hdr_lat_1_99 = hdr_lat_1.get_value_at_percentile(99.0)
450 hdr_lat_2_50 = hdr_lat_2.get_value_at_percentile(50.0)
451 hdr_lat_2_90 = hdr_lat_2.get_value_at_percentile(90.0)
452 hdr_lat_2_99 = hdr_lat_2.get_value_at_percentile(99.0)
454 if (hdr_lat_1_50 + hdr_lat_1_90 + hdr_lat_1_99 +
455 hdr_lat_2_50 + hdr_lat_2_90 + hdr_lat_2_99):
457 f"{hdr_lat_1_50} {hdr_lat_1_90} {hdr_lat_1_99} "
458 f"{hdr_lat_2_50} {hdr_lat_2_90} {hdr_lat_2_99}"
464 pdr_lat_10 = _process_lat(data[u'pdr_lat_10_1'],
465 data[u'pdr_lat_10_2'])
466 pdr_lat_50 = _process_lat(data[u'pdr_lat_50_1'],
467 data[u'pdr_lat_50_2'])
468 pdr_lat_90 = _process_lat(data[u'pdr_lat_90_1'],
469 data[u'pdr_lat_90_2'])
470 pdr_lat_10 = f"\n3. {pdr_lat_10}" if pdr_lat_10 else u""
471 pdr_lat_50 = f"\n4. {pdr_lat_50}" if pdr_lat_50 else u""
472 pdr_lat_90 = f"\n5. {pdr_lat_90}" if pdr_lat_90 else u""
475 f"1. {(data[u'ndr_low'] / 1e6):.2f} "
476 f"{data[u'ndr_low_b']:.2f}"
477 f"\n2. {(data[u'pdr_low'] / 1e6):.2f} "
478 f"{data[u'pdr_low_b']:.2f}"
483 except (AttributeError, IndexError, ValueError, KeyError):
486 def _get_testbed(self, msg):
487 """Called when extraction of testbed IP is required.
488 The testbed is identified by TG node IP address.
490 :param msg: Message to process.
495 if msg.message.count(u"Setup of TG node") or \
496 msg.message.count(u"Setup of node TG host"):
497 reg_tg_ip = re.compile(
498 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
500 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
501 except (KeyError, ValueError, IndexError, AttributeError):
504 self._data[u"metadata"][u"testbed"] = self._testbed
505 self._msg_type = None
507 def _get_vpp_version(self, msg):
508 """Called when extraction of VPP version is required.
510 :param msg: Message to process.
515 if msg.message.count(u"return STDOUT Version:") or \
516 msg.message.count(u"VPP Version:") or \
517 msg.message.count(u"VPP version:"):
518 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
520 self._data[u"metadata"][u"version"] = self._version
521 self._msg_type = None
523 def _get_dpdk_version(self, msg):
524 """Called when extraction of DPDK version is required.
526 :param msg: Message to process.
531 if msg.message.count(u"DPDK Version:"):
533 self._version = str(re.search(
534 self.REGEX_VERSION_DPDK, msg.message).group(2))
535 self._data[u"metadata"][u"version"] = self._version
539 self._msg_type = None
541 def _get_timestamp(self, msg):
542 """Called when extraction of timestamp is required.
544 :param msg: Message to process.
549 self._timestamp = msg.timestamp[:14]
550 self._data[u"metadata"][u"generated"] = self._timestamp
551 self._msg_type = None
553 def _get_vat_history(self, msg):
554 """Called when extraction of VAT command history is required.
556 TODO: Remove when not needed.
558 :param msg: Message to process.
562 if msg.message.count(u"VAT command history:"):
563 self._conf_history_lookup_nr += 1
564 if self._conf_history_lookup_nr == 1:
565 self._data[u"tests"][self._test_id][u"conf-history"] = str()
567 self._msg_type = None
568 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
569 r"VAT command history:", u"",
570 msg.message, count=1).replace(u'\n', u' |br| ').\
573 self._data[u"tests"][self._test_id][u"conf-history"] += (
574 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
577 def _get_papi_history(self, msg):
578 """Called when extraction of PAPI command history is required.
580 :param msg: Message to process.
584 if msg.message.count(u"PAPI command history:"):
585 self._conf_history_lookup_nr += 1
586 if self._conf_history_lookup_nr == 1:
587 self._data[u"tests"][self._test_id][u"conf-history"] = str()
589 self._msg_type = None
590 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
591 r"PAPI command history:", u"",
592 msg.message, count=1).replace(u'\n', u' |br| ').\
594 self._data[u"tests"][self._test_id][u"conf-history"] += (
595 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
598 def _get_show_run(self, msg):
599 """Called when extraction of VPP operational data (output of CLI command
600 Show Runtime) is required.
602 :param msg: Message to process.
607 if not msg.message.count(u"stats runtime"):
611 if self._sh_run_counter > 1:
614 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
615 self._data[u"tests"][self._test_id][u"show-run"] = dict()
617 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
621 host = groups.group(1)
622 except (AttributeError, IndexError):
625 sock = groups.group(2)
626 except (AttributeError, IndexError):
629 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
630 replace(u"'", u'"').replace(u'b"', u'"').
631 replace(u'u"', u'"').split(u":", 1)[1])
634 threads_nr = len(runtime[0][u"clocks"])
635 except (IndexError, KeyError):
638 dut = u"DUT{nr}".format(
639 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
644 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
648 for idx in range(threads_nr):
649 if item[u"vectors"][idx] > 0:
650 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
651 elif item[u"calls"][idx] > 0:
652 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
653 elif item[u"suspends"][idx] > 0:
654 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
658 if item[u"calls"][idx] > 0:
659 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
663 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
664 int(item[u"suspends"][idx]):
665 oper[u"threads"][idx].append([
668 item[u"vectors"][idx],
669 item[u"suspends"][idx],
674 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
676 def _get_ndrpdr_throughput(self, msg):
677 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
680 :param msg: The test message to be parsed.
682 :returns: Parsed data as a dict and the status (PASS/FAIL).
683 :rtype: tuple(dict, str)
687 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
688 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
691 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
693 if groups is not None:
695 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
696 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
697 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
698 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
700 except (IndexError, ValueError):
703 return throughput, status
705 def _get_plr_throughput(self, msg):
706 """Get PLRsearch lower bound and PLRsearch upper bound from the test
709 :param msg: The test message to be parsed.
711 :returns: Parsed data as a dict and the status (PASS/FAIL).
712 :rtype: tuple(dict, str)
720 groups = re.search(self.REGEX_PLR_RATE, msg)
722 if groups is not None:
724 throughput[u"LOWER"] = float(groups.group(1))
725 throughput[u"UPPER"] = float(groups.group(2))
727 except (IndexError, ValueError):
730 return throughput, status
732 def _get_ndrpdr_latency(self, msg):
733 """Get LATENCY from the test message.
735 :param msg: The test message to be parsed.
737 :returns: Parsed data as a dict and the status (PASS/FAIL).
738 :rtype: tuple(dict, str)
748 u"direction1": copy.copy(latency_default),
749 u"direction2": copy.copy(latency_default)
752 u"direction1": copy.copy(latency_default),
753 u"direction2": copy.copy(latency_default)
756 u"direction1": copy.copy(latency_default),
757 u"direction2": copy.copy(latency_default)
760 u"direction1": copy.copy(latency_default),
761 u"direction2": copy.copy(latency_default)
764 u"direction1": copy.copy(latency_default),
765 u"direction2": copy.copy(latency_default)
768 u"direction1": copy.copy(latency_default),
769 u"direction2": copy.copy(latency_default)
773 # TODO: Rewrite when long and base are not needed
774 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
776 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
778 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
780 return latency, u"FAIL"
782 def process_latency(in_str):
783 """Return object with parsed latency values.
785 TODO: Define class for the return type.
787 :param in_str: Input string, min/avg/max/hdrh format.
789 :returns: Dict with corresponding keys, except hdrh float values.
791 :throws IndexError: If in_str does not have enough substrings.
792 :throws ValueError: If a substring does not convert to float.
794 in_list = in_str.split('/', 3)
797 u"min": float(in_list[0]),
798 u"avg": float(in_list[1]),
799 u"max": float(in_list[2]),
803 if len(in_list) == 4:
804 rval[u"hdrh"] = str(in_list[3])
809 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
810 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
811 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
812 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
813 if groups.lastindex == 4:
814 return latency, u"PASS"
815 except (IndexError, ValueError):
819 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
820 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
821 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
822 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
823 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
824 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
825 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
826 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
827 if groups.lastindex == 12:
828 return latency, u"PASS"
829 except (IndexError, ValueError):
832 # TODO: Remove when not needed
833 latency[u"NDR10"] = {
834 u"direction1": copy.copy(latency_default),
835 u"direction2": copy.copy(latency_default)
837 latency[u"NDR50"] = {
838 u"direction1": copy.copy(latency_default),
839 u"direction2": copy.copy(latency_default)
841 latency[u"NDR90"] = {
842 u"direction1": copy.copy(latency_default),
843 u"direction2": copy.copy(latency_default)
846 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
847 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
848 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
849 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
850 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
851 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
852 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
853 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
854 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
855 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
856 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
857 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
858 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
859 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
860 return latency, u"PASS"
861 except (IndexError, ValueError):
864 return latency, u"FAIL"
866 def visit_suite(self, suite):
867 """Implements traversing through the suite and its direct children.
869 :param suite: Suite to process.
873 if self.start_suite(suite) is not False:
874 suite.suites.visit(self)
875 suite.tests.visit(self)
876 self.end_suite(suite)
878 def start_suite(self, suite):
879 """Called when suite starts.
881 :param suite: Suite to process.
887 parent_name = suite.parent.name
888 except AttributeError:
891 doc_str = suite.doc.\
892 replace(u'"', u"'").\
893 replace(u'\n', u' ').\
894 replace(u'\r', u'').\
895 replace(u'*[', u' |br| *[').\
896 replace(u"*", u"**").\
897 replace(u' |br| *[', u'*[', 1)
899 self._data[u"suites"][suite.longname.lower().
901 replace(u" ", u"_")] = {
902 u"name": suite.name.lower(),
904 u"parent": parent_name,
905 u"level": len(suite.longname.split(u"."))
908 suite.keywords.visit(self)
910 def end_suite(self, suite):
911 """Called when suite ends.
913 :param suite: Suite to process.
918 def visit_test(self, test):
919 """Implements traversing through the test.
921 :param test: Test to process.
925 if self.start_test(test) is not False:
926 test.keywords.visit(self)
929 def start_test(self, test):
930 """Called when test starts.
932 :param test: Test to process.
937 self._sh_run_counter = 0
939 longname_orig = test.longname.lower()
941 # Check the ignore list
942 if longname_orig in self._ignore:
945 tags = [str(tag) for tag in test.tags]
948 # Change the TC long name and name if defined in the mapping table
949 longname = self._mapping.get(longname_orig, None)
950 if longname is not None:
951 name = longname.split(u'.')[-1]
953 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
957 longname = longname_orig
958 name = test.name.lower()
960 # Remove TC number from the TC long name (backward compatibility):
961 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
962 # Remove TC number from the TC name (not needed):
963 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
965 test_result[u"parent"] = test.parent.name.lower()
966 test_result[u"tags"] = tags
967 test_result["doc"] = test.doc.\
968 replace(u'"', u"'").\
969 replace(u'\n', u' ').\
970 replace(u'\r', u'').\
971 replace(u'[', u' |br| [').\
972 replace(u' |br| [', u'[', 1)
973 test_result[u"msg"] = test.message.\
974 replace(u'\n', u' |br| ').\
975 replace(u'\r', u'').\
977 test_result[u"type"] = u"FUNC"
978 test_result[u"status"] = test.status
980 if u"PERFTEST" in tags:
981 # Replace info about cores (e.g. -1c-) with the info about threads
982 # and cores (e.g. -1t1c-) in the long test case names and in the
983 # test case names if necessary.
984 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
988 for tag in test_result[u"tags"]:
989 groups = re.search(self.REGEX_TC_TAG, tag)
995 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
996 f"-{tag_tc.lower()}-",
999 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
1000 f"-{tag_tc.lower()}-",
1001 test_result["name"],
1004 test_result[u"status"] = u"FAIL"
1005 self._data[u"tests"][self._test_id] = test_result
1007 f"The test {self._test_id} has no or more than one "
1008 f"multi-threading tags.\n"
1009 f"Tags: {test_result[u'tags']}"
1013 if test.status == u"PASS":
1014 if u"NDRPDR" in tags:
1015 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1017 replace(u'\n', u' |br| '). \
1018 replace(u'\r', u''). \
1020 test_result[u"type"] = u"NDRPDR"
1021 test_result[u"throughput"], test_result[u"status"] = \
1022 self._get_ndrpdr_throughput(test.message)
1023 test_result[u"latency"], test_result[u"status"] = \
1024 self._get_ndrpdr_latency(test.message)
1025 elif u"SOAK" in tags:
1026 test_result[u"type"] = u"SOAK"
1027 test_result[u"throughput"], test_result[u"status"] = \
1028 self._get_plr_throughput(test.message)
1029 elif u"TCP" in tags:
1030 test_result[u"type"] = u"TCP"
1031 groups = re.search(self.REGEX_TCP, test.message)
1032 test_result[u"result"] = int(groups.group(2))
1033 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1034 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1036 replace(u'\n', u' |br| '). \
1037 replace(u'\r', u''). \
1040 test_result[u"type"] = u"MRR"
1042 test_result[u"type"] = u"BMRR"
1044 test_result[u"result"] = dict()
1045 groups = re.search(self.REGEX_BMRR, test.message)
1046 if groups is not None:
1047 items_str = groups.group(1)
1048 items_float = [float(item.strip()) for item
1049 in items_str.split(",")]
1050 # Use whole list in CSIT-1180.
1051 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1052 test_result[u"result"][u"receive-rate"] = stats.avg
1054 groups = re.search(self.REGEX_MRR, test.message)
1055 test_result[u"result"][u"receive-rate"] = \
1056 float(groups.group(3)) / float(groups.group(1))
1057 elif u"RECONF" in tags:
1058 test_result[u"type"] = u"RECONF"
1059 test_result[u"result"] = None
1061 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1062 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1063 test_result[u"result"] = {
1064 u"loss": int(grps_loss.group(1)),
1065 u"time": float(grps_time.group(1))
1067 except (AttributeError, IndexError, ValueError, TypeError):
1068 test_result[u"status"] = u"FAIL"
1069 elif u"DEVICETEST" in tags:
1070 test_result[u"type"] = u"DEVICETEST"
1072 test_result[u"status"] = u"FAIL"
1073 self._data[u"tests"][self._test_id] = test_result
1076 self._data[u"tests"][self._test_id] = test_result
1078 def end_test(self, test):
1079 """Called when test ends.
1081 :param test: Test to process.
1086 def visit_keyword(self, keyword):
1087 """Implements traversing through the keyword and its child keywords.
1089 :param keyword: Keyword to process.
1090 :type keyword: Keyword
1093 if self.start_keyword(keyword) is not False:
1094 self.end_keyword(keyword)
1096 def start_keyword(self, keyword):
1097 """Called when keyword starts. Default implementation does nothing.
1099 :param keyword: Keyword to process.
1100 :type keyword: Keyword
1104 if keyword.type == u"setup":
1105 self.visit_setup_kw(keyword)
1106 elif keyword.type == u"teardown":
1107 self.visit_teardown_kw(keyword)
1109 self.visit_test_kw(keyword)
1110 except AttributeError:
1113 def end_keyword(self, keyword):
1114 """Called when keyword ends. Default implementation does nothing.
1116 :param keyword: Keyword to process.
1117 :type keyword: Keyword
1121 def visit_test_kw(self, test_kw):
1122 """Implements traversing through the test keyword and its child
1125 :param test_kw: Keyword to process.
1126 :type test_kw: Keyword
1129 for keyword in test_kw.keywords:
1130 if self.start_test_kw(keyword) is not False:
1131 self.visit_test_kw(keyword)
1132 self.end_test_kw(keyword)
1134 def start_test_kw(self, test_kw):
1135 """Called when test keyword starts. Default implementation does
1138 :param test_kw: Keyword to process.
1139 :type test_kw: Keyword
1142 if test_kw.name.count(u"Show Runtime On All Duts") or \
1143 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1144 self._msg_type = u"test-show-runtime"
1145 self._sh_run_counter += 1
1146 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1147 self._msg_type = u"dpdk-version"
1150 test_kw.messages.visit(self)
1152 def end_test_kw(self, test_kw):
1153 """Called when keyword ends. Default implementation does nothing.
1155 :param test_kw: Keyword to process.
1156 :type test_kw: Keyword
1160 def visit_setup_kw(self, setup_kw):
1161 """Implements traversing through the teardown keyword and its child
1164 :param setup_kw: Keyword to process.
1165 :type setup_kw: Keyword
1168 for keyword in setup_kw.keywords:
1169 if self.start_setup_kw(keyword) is not False:
1170 self.visit_setup_kw(keyword)
1171 self.end_setup_kw(keyword)
1173 def start_setup_kw(self, setup_kw):
1174 """Called when teardown keyword starts. Default implementation does
1177 :param setup_kw: Keyword to process.
1178 :type setup_kw: Keyword
1181 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1182 and not self._version:
1183 self._msg_type = u"vpp-version"
1184 elif setup_kw.name.count(u"Set Global Variable") \
1185 and not self._timestamp:
1186 self._msg_type = u"timestamp"
1187 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1188 self._msg_type = u"testbed"
1191 setup_kw.messages.visit(self)
1193 def end_setup_kw(self, setup_kw):
1194 """Called when keyword ends. Default implementation does nothing.
1196 :param setup_kw: Keyword to process.
1197 :type setup_kw: Keyword
1201 def visit_teardown_kw(self, teardown_kw):
1202 """Implements traversing through the teardown keyword and its child
1205 :param teardown_kw: Keyword to process.
1206 :type teardown_kw: Keyword
1209 for keyword in teardown_kw.keywords:
1210 if self.start_teardown_kw(keyword) is not False:
1211 self.visit_teardown_kw(keyword)
1212 self.end_teardown_kw(keyword)
1214 def start_teardown_kw(self, teardown_kw):
1215 """Called when teardown keyword starts
1217 :param teardown_kw: Keyword to process.
1218 :type teardown_kw: Keyword
1222 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1223 # TODO: Remove when not needed:
1224 self._conf_history_lookup_nr = 0
1225 self._msg_type = u"teardown-vat-history"
1226 teardown_kw.messages.visit(self)
1227 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1228 self._conf_history_lookup_nr = 0
1229 self._msg_type = u"teardown-papi-history"
1230 teardown_kw.messages.visit(self)
1232 def end_teardown_kw(self, teardown_kw):
1233 """Called when keyword ends. Default implementation does nothing.
1235 :param teardown_kw: Keyword to process.
1236 :type teardown_kw: Keyword
1240 def visit_message(self, msg):
1241 """Implements visiting the message.
1243 :param msg: Message to process.
1247 if self.start_message(msg) is not False:
1248 self.end_message(msg)
1250 def start_message(self, msg):
1251 """Called when message starts. Get required information from messages:
1254 :param msg: Message to process.
1260 self.parse_msg[self._msg_type](msg)
1262 def end_message(self, msg):
1263 """Called when message ends. Default implementation does nothing.
1265 :param msg: Message to process.
1274 The data is extracted from output.xml files generated by Jenkins jobs and
1275 stored in pandas' DataFrames.
1281 (as described in ExecutionChecker documentation)
1283 (as described in ExecutionChecker documentation)
1285 (as described in ExecutionChecker documentation)
1288 def __init__(self, spec):
1291 :param spec: Specification.
1292 :type spec: Specification
1299 self._input_data = pd.Series()
1303 """Getter - Input data.
1305 :returns: Input data
1306 :rtype: pandas.Series
1308 return self._input_data
1310 def metadata(self, job, build):
1311 """Getter - metadata
1313 :param job: Job which metadata we want.
1314 :param build: Build which metadata we want.
1318 :rtype: pandas.Series
1321 return self.data[job][build][u"metadata"]
1323 def suites(self, job, build):
1326 :param job: Job which suites we want.
1327 :param build: Build which suites we want.
1331 :rtype: pandas.Series
1334 return self.data[job][str(build)][u"suites"]
1336 def tests(self, job, build):
1339 :param job: Job which tests we want.
1340 :param build: Build which tests we want.
1344 :rtype: pandas.Series
1347 return self.data[job][build][u"tests"]
1349 def _parse_tests(self, job, build, log):
1350 """Process data from robot output.xml file and return JSON structured
1353 :param job: The name of job which build output data will be processed.
1354 :param build: The build which output data will be processed.
1355 :param log: List of log messages.
1358 :type log: list of tuples (severity, msg)
1359 :returns: JSON data structure.
1368 with open(build[u"file-name"], u'r') as data_file:
1370 result = ExecutionResult(data_file)
1371 except errors.DataError as err:
1373 (u"ERROR", f"Error occurred while parsing output.xml: "
1377 checker = ExecutionChecker(metadata, self._cfg.mapping,
1379 result.visit(checker)
1383 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1384 """Download and parse the input data file.
1386 :param pid: PID of the process executing this method.
1387 :param job: Name of the Jenkins job which generated the processed input
1389 :param build: Information about the Jenkins build which generated the
1390 processed input file.
1391 :param repeat: Repeat the download specified number of times if not
1402 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1410 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1418 f"It is not possible to download the input data file from the "
1419 f"job {job}, build {build[u'build']}, or it is damaged. "
1425 f" Processing data from the build {build[u'build']} ...")
1427 data = self._parse_tests(job, build, logs)
1431 f"Input data file from the job {job}, build "
1432 f"{build[u'build']} is damaged. Skipped.")
1435 state = u"processed"
1438 remove(build[u"file-name"])
1439 except OSError as err:
1441 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1445 # If the time-period is defined in the specification file, remove all
1446 # files which are outside the time period.
1447 timeperiod = self._cfg.input.get(u"time-period", None)
1448 if timeperiod and data:
1450 timeperiod = timedelta(int(timeperiod))
1451 metadata = data.get(u"metadata", None)
1453 generated = metadata.get(u"generated", None)
1455 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1456 if (now - generated) > timeperiod:
1457 # Remove the data and the file:
1462 f" The build {job}/{build[u'build']} is "
1463 f"outdated, will be removed.")
1465 logs.append((u"INFO", u" Done."))
1467 for level, line in logs:
1468 if level == u"INFO":
1470 elif level == u"ERROR":
1472 elif level == u"DEBUG":
1474 elif level == u"CRITICAL":
1475 logging.critical(line)
1476 elif level == u"WARNING":
1477 logging.warning(line)
1479 return {u"data": data, u"state": state, u"job": job, u"build": build}
1481 def download_and_parse_data(self, repeat=1):
1482 """Download the input data files, parse input data from input files and
1483 store in pandas' Series.
1485 :param repeat: Repeat the download specified number of times if not
1490 logging.info(u"Downloading and parsing input files ...")
1492 for job, builds in self._cfg.builds.items():
1493 for build in builds:
1495 result = self._download_and_parse_build(job, build, repeat)
1496 build_nr = result[u"build"][u"build"]
1499 data = result[u"data"]
1500 build_data = pd.Series({
1501 u"metadata": pd.Series(
1502 list(data[u"metadata"].values()),
1503 index=list(data[u"metadata"].keys())
1505 u"suites": pd.Series(
1506 list(data[u"suites"].values()),
1507 index=list(data[u"suites"].keys())
1509 u"tests": pd.Series(
1510 list(data[u"tests"].values()),
1511 index=list(data[u"tests"].keys())
1515 if self._input_data.get(job, None) is None:
1516 self._input_data[job] = pd.Series()
1517 self._input_data[job][str(build_nr)] = build_data
1519 self._cfg.set_input_file_name(
1520 job, build_nr, result[u"build"][u"file-name"])
1522 self._cfg.set_input_state(job, build_nr, result[u"state"])
1525 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1526 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1528 logging.info(u"Done.")
1531 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1532 """Return the index of character in the string which is the end of tag.
1534 :param tag_filter: The string where the end of tag is being searched.
1535 :param start: The index where the searching is stated.
1536 :param closer: The character which is the tag closer.
1537 :type tag_filter: str
1540 :returns: The index of the tag closer.
1545 idx_opener = tag_filter.index(closer, start)
1546 return tag_filter.index(closer, idx_opener + 1)
1551 def _condition(tag_filter):
1552 """Create a conditional statement from the given tag filter.
1554 :param tag_filter: Filter based on tags from the element specification.
1555 :type tag_filter: str
1556 :returns: Conditional statement which can be evaluated.
1562 index = InputData._end_of_tag(tag_filter, index)
1566 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1568 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1569 continue_on_error=False):
1570 """Filter required data from the given jobs and builds.
1572 The output data structure is:
1576 - test (or suite) 1 ID:
1582 - test (or suite) n ID:
1589 :param element: Element which will use the filtered data.
1590 :param params: Parameters which will be included in the output. If None,
1591 all parameters are included.
1592 :param data: If not None, this data is used instead of data specified
1594 :param data_set: The set of data to be filtered: tests, suites,
1596 :param continue_on_error: Continue if there is error while reading the
1597 data. The Item will be empty then
1598 :type element: pandas.Series
1602 :type continue_on_error: bool
1603 :returns: Filtered data.
1604 :rtype pandas.Series
1608 if data_set == "suites":
1610 elif element[u"filter"] in (u"all", u"template"):
1613 cond = InputData._condition(element[u"filter"])
1614 logging.debug(f" Filter: {cond}")
1616 logging.error(u" No filter defined.")
1620 params = element.get(u"parameters", None)
1622 params.append(u"type")
1624 data_to_filter = data if data else element[u"data"]
1627 for job, builds in data_to_filter.items():
1628 data[job] = pd.Series()
1629 for build in builds:
1630 data[job][str(build)] = pd.Series()
1633 self.data[job][str(build)][data_set].items())
1635 if continue_on_error:
1639 for test_id, test_data in data_dict.items():
1640 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1641 data[job][str(build)][test_id] = pd.Series()
1643 for param, val in test_data.items():
1644 data[job][str(build)][test_id][param] = val
1646 for param in params:
1648 data[job][str(build)][test_id][param] =\
1651 data[job][str(build)][test_id][param] =\
1655 except (KeyError, IndexError, ValueError) as err:
1657 f"Missing mandatory parameter in the element specification: "
1661 except AttributeError as err:
1662 logging.error(repr(err))
1664 except SyntaxError as err:
1666 f"The filter {cond} is not correct. Check if all tags are "
1667 f"enclosed by apostrophes.\n{repr(err)}"
1671 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1672 continue_on_error=False):
1673 """Filter required data from the given jobs and builds.
1675 The output data structure is:
1679 - test (or suite) 1 ID:
1685 - test (or suite) n ID:
1692 :param element: Element which will use the filtered data.
1693 :param params: Parameters which will be included in the output. If None,
1694 all parameters are included.
1695 :param data_set: The set of data to be filtered: tests, suites,
1697 :param continue_on_error: Continue if there is error while reading the
1698 data. The Item will be empty then
1699 :type element: pandas.Series
1702 :type continue_on_error: bool
1703 :returns: Filtered data.
1704 :rtype pandas.Series
1707 include = element.get(u"include", None)
1709 logging.warning(u"No tests to include, skipping the element.")
1713 params = element.get(u"parameters", None)
1715 params.append(u"type")
1719 for job, builds in element[u"data"].items():
1720 data[job] = pd.Series()
1721 for build in builds:
1722 data[job][str(build)] = pd.Series()
1723 for test in include:
1725 reg_ex = re.compile(str(test).lower())
1726 for test_id in self.data[job][
1727 str(build)][data_set].keys():
1728 if re.match(reg_ex, str(test_id).lower()):
1729 test_data = self.data[job][
1730 str(build)][data_set][test_id]
1731 data[job][str(build)][test_id] = pd.Series()
1733 for param, val in test_data.items():
1734 data[job][str(build)][test_id]\
1737 for param in params:
1739 data[job][str(build)][
1743 data[job][str(build)][
1744 test_id][param] = u"No Data"
1745 except KeyError as err:
1746 logging.error(repr(err))
1747 if continue_on_error:
1752 except (KeyError, IndexError, ValueError) as err:
1754 f"Missing mandatory parameter in the element "
1755 f"specification: {repr(err)}"
1758 except AttributeError as err:
1759 logging.error(repr(err))
1763 def merge_data(data):
1764 """Merge data from more jobs and builds to a simple data structure.
1766 The output data structure is:
1768 - test (suite) 1 ID:
1774 - test (suite) n ID:
1777 :param data: Data to merge.
1778 :type data: pandas.Series
1779 :returns: Merged data.
1780 :rtype: pandas.Series
1783 logging.info(u" Merging data ...")
1785 merged_data = pd.Series()
1786 for builds in data.values:
1787 for item in builds.values:
1788 for item_id, item_data in item.items():
1789 merged_data[item_id] = item_data
1793 def print_all_oper_data(self):
1794 """Print all operational data to console.
1802 u"Cycles per Packet",
1803 u"Average Vector Size"
1806 for job in self._input_data.values:
1807 for build in job.values:
1808 for test_id, test_data in build[u"tests"].items():
1810 if test_data.get(u"show-run", None) is None:
1812 for dut_name, data in test_data[u"show-run"].items():
1813 if data.get(u"threads", None) is None:
1815 print(f"Host IP: {data.get(u'host', '')}, "
1816 f"Socket: {data.get(u'socket', '')}")
1817 for thread_nr, thread in data[u"threads"].items():
1818 txt_table = prettytable.PrettyTable(tbl_hdr)
1821 txt_table.add_row(row)
1823 if len(thread) == 0:
1826 avg = f", Average Vector Size per Node: " \
1827 f"{(avg / len(thread)):.2f}"
1828 th_name = u"main" if thread_nr == 0 \
1829 else f"worker_{thread_nr}"
1830 print(f"{dut_name}, {th_name}{avg}")
1831 txt_table.float_format = u".2"
1832 txt_table.align = u"r"
1833 txt_table.align[u"Name"] = u"l"
1834 print(f"{txt_table.get_string()}\n")