1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
218 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
219 r'NDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
220 r'PDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
221 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
222 r'PDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*)'
224 # TODO: Remove when not needed
225 REGEX_NDRPDR_LAT_BASE = re.compile(
226 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
229 REGEX_NDRPDR_LAT = re.compile(
230 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
231 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
237 # TODO: Remove when not needed
238 REGEX_NDRPDR_LAT_LONG = re.compile(
239 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
249 REGEX_VERSION_VPP = re.compile(
250 r"(return STDOUT Version:\s*|"
251 r"VPP Version:\s*|VPP version:\s*)(.*)"
253 REGEX_VERSION_DPDK = re.compile(
254 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
256 REGEX_TCP = re.compile(
257 r'Total\s(rps|cps|throughput):\s(\d*).*$'
259 REGEX_MRR = re.compile(
260 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
261 r'tx\s(\d*),\srx\s(\d*)'
263 REGEX_BMRR = re.compile(
264 r'Maximum Receive Rate trial results'
265 r' in packets per second: \[(.*)\]'
267 REGEX_RECONF_LOSS = re.compile(
268 r'Packets lost due to reconfig: (\d*)'
270 REGEX_RECONF_TIME = re.compile(
271 r'Implied time lost: (\d*.[\de-]*)'
273 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
275 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
277 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
279 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
281 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
283 def __init__(self, metadata, mapping, ignore):
286 :param metadata: Key-value pairs to be included in "metadata" part of
288 :param mapping: Mapping of the old names of test cases to the new
290 :param ignore: List of TCs to be ignored.
296 # Type of message to parse out from the test messages
297 self._msg_type = None
303 self._timestamp = None
305 # Testbed. The testbed is identified by TG node IP address.
308 # Mapping of TCs long names
309 self._mapping = mapping
312 self._ignore = ignore
314 # Number of PAPI History messages found:
316 # 1 - PAPI History of DUT1
317 # 2 - PAPI History of DUT2
318 self._conf_history_lookup_nr = 0
320 self._sh_run_counter = 0
322 # Test ID of currently processed test- the lowercase full path to the
326 # The main data structure
328 u"metadata": OrderedDict(),
329 u"suites": OrderedDict(),
330 u"tests": OrderedDict()
333 # Save the provided metadata
334 for key, val in metadata.items():
335 self._data[u"metadata"][key] = val
337 # Dictionary defining the methods used to parse different types of
340 u"timestamp": self._get_timestamp,
341 u"vpp-version": self._get_vpp_version,
342 u"dpdk-version": self._get_dpdk_version,
343 # TODO: Remove when not needed:
344 u"teardown-vat-history": self._get_vat_history,
345 u"teardown-papi-history": self._get_papi_history,
346 u"test-show-runtime": self._get_show_run,
347 u"testbed": self._get_testbed
352 """Getter - Data parsed from the XML file.
354 :returns: Data parsed from the XML file.
359 def _get_data_from_perf_test_msg(self, msg):
367 from message of NDRPDR performance tests.
369 :param msg: Message to be processed.
371 :returns: Processed message or original message if a problem occurs.
375 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
376 if not groups or groups.lastindex != 20:
381 u"ndr_low": float(groups.group(1)),
382 u"ndr_low_unit": groups.group(2),
383 u"ndr_low_b": float(groups.group(3)),
384 u"ndr_low_b_unit": groups.group(4),
385 u"ndr_lat_1": groups.group(5),
386 u"ndr_lat_2": groups.group(6),
387 u"ndr_up": float(groups.group(7)),
388 u"ndr_up_unit": groups.group(8),
389 u"ndr_up_b": float(groups.group(9)),
390 u"ndr_up_b_unit": groups.group(10),
391 u"pdr_low": float(groups.group(11)),
392 u"pdr_low_unit": groups.group(12),
393 u"pdr_low_b": float(groups.group(13)),
394 u"pdr_low_b_unit": groups.group(14),
395 u"pdr_lat_1": groups.group(15),
396 u"pdr_lat_2": groups.group(16),
397 u"pdr_up": float(groups.group(17)),
398 u"pdr_up_unit": groups.group(18),
399 u"pdr_up_b": float(groups.group(19)),
400 u"pdr_up_b_unit": groups.group(20)
402 except (AttributeError, IndexError, ValueError, KeyError):
405 def _process_lat(in_str_1, in_str_2):
406 """Extract min, avg, max values from latency string.
408 :param in_str_1: Latency string for one direction produced by robot
410 :param in_str_2: Latency string for second direction produced by
414 :returns: Processed latency string or original string if a problem
416 :rtype: tuple(str, str)
418 in_list_1 = in_str_1.split('/', 3)
419 if len(in_list_1) < 3:
420 return u"Not Measured.", u"Not Measured."
422 in_list_2 = in_str_2.split('/', 3)
423 if len(in_list_2) < 3:
424 return u"Not Measured.", u"Not Measured."
427 if len(in_list_1) == 4:
428 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
430 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
431 except hdrh.codec.HdrLengthException:
434 if len(in_list_2) == 4:
435 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
437 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
438 except hdrh.codec.HdrLengthException:
441 hdr_lat = u"Not Measured."
442 if hdr_lat_1 and hdr_lat_2:
444 f"50%/90%/99%/99.9%, "
445 f"{hdr_lat_1.get_value_at_percentile(50.0)}/"
446 f"{hdr_lat_1.get_value_at_percentile(90.0)}/"
447 f"{hdr_lat_1.get_value_at_percentile(99.0)}/"
448 f"{hdr_lat_1.get_value_at_percentile(99.9)}, "
449 f"{hdr_lat_2.get_value_at_percentile(50.0)}/"
450 f"{hdr_lat_2.get_value_at_percentile(90.0)}/"
451 f"{hdr_lat_2.get_value_at_percentile(99.0)}/"
452 f"{hdr_lat_2.get_value_at_percentile(99.9)} "
458 f"{in_list_1[0]}/{in_list_1[1]}/{in_list_1[2]}, "
459 f"{in_list_2[0]}/{in_list_2[1]}/{in_list_2[2]} uSec.",
464 pdr_lat = _process_lat(data[u'pdr_lat_1'], data[u'pdr_lat_2'])
465 ndr_lat = _process_lat(data[u'ndr_lat_1'], data[u'ndr_lat_2'])
467 f"NDR Throughput: {(data[u'ndr_low'] / 1e6):.2f} "
468 f"M{data[u'ndr_low_unit']}, "
469 f"{data[u'ndr_low_b']:.2f} {data[u'ndr_low_b_unit']}.\n"
470 f"One-Way Latency at NDR: {ndr_lat[0]}\n"
471 f"One-Way Latency at NDR by percentiles: {ndr_lat[1]}\n"
472 f"PDR Throughput: {(data[u'pdr_low'] / 1e6):.2f} "
473 f"M{data[u'pdr_low_unit']}, "
474 f"{data[u'pdr_low_b']:.2f} {data[u'pdr_low_b_unit']}.\n"
475 f"One-Way Latency at PDR: {pdr_lat[0]}\n"
476 f"One-Way Latency at PDR by percentiles: {pdr_lat[1]}"
478 except (AttributeError, IndexError, ValueError, KeyError):
481 def _get_testbed(self, msg):
482 """Called when extraction of testbed IP is required.
483 The testbed is identified by TG node IP address.
485 :param msg: Message to process.
490 if msg.message.count(u"Setup of TG node") or \
491 msg.message.count(u"Setup of node TG host"):
492 reg_tg_ip = re.compile(
493 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
495 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
496 except (KeyError, ValueError, IndexError, AttributeError):
499 self._data[u"metadata"][u"testbed"] = self._testbed
500 self._msg_type = None
502 def _get_vpp_version(self, msg):
503 """Called when extraction of VPP version is required.
505 :param msg: Message to process.
510 if msg.message.count(u"return STDOUT Version:") or \
511 msg.message.count(u"VPP Version:") or \
512 msg.message.count(u"VPP version:"):
513 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
515 self._data[u"metadata"][u"version"] = self._version
516 self._msg_type = None
518 def _get_dpdk_version(self, msg):
519 """Called when extraction of DPDK version is required.
521 :param msg: Message to process.
526 if msg.message.count(u"DPDK Version:"):
528 self._version = str(re.search(
529 self.REGEX_VERSION_DPDK, msg.message).group(2))
530 self._data[u"metadata"][u"version"] = self._version
534 self._msg_type = None
536 def _get_timestamp(self, msg):
537 """Called when extraction of timestamp is required.
539 :param msg: Message to process.
544 self._timestamp = msg.timestamp[:14]
545 self._data[u"metadata"][u"generated"] = self._timestamp
546 self._msg_type = None
548 def _get_vat_history(self, msg):
549 """Called when extraction of VAT command history is required.
551 TODO: Remove when not needed.
553 :param msg: Message to process.
557 if msg.message.count(u"VAT command history:"):
558 self._conf_history_lookup_nr += 1
559 if self._conf_history_lookup_nr == 1:
560 self._data[u"tests"][self._test_id][u"conf-history"] = str()
562 self._msg_type = None
563 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
564 r"VAT command history:", u"",
565 msg.message, count=1).replace(u'\n', u' |br| ').\
568 self._data[u"tests"][self._test_id][u"conf-history"] += (
569 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
572 def _get_papi_history(self, msg):
573 """Called when extraction of PAPI command history is required.
575 :param msg: Message to process.
579 if msg.message.count(u"PAPI command history:"):
580 self._conf_history_lookup_nr += 1
581 if self._conf_history_lookup_nr == 1:
582 self._data[u"tests"][self._test_id][u"conf-history"] = str()
584 self._msg_type = None
585 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
586 r"PAPI command history:", u"",
587 msg.message, count=1).replace(u'\n', u' |br| ').\
589 self._data[u"tests"][self._test_id][u"conf-history"] += (
590 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
593 def _get_show_run(self, msg):
594 """Called when extraction of VPP operational data (output of CLI command
595 Show Runtime) is required.
597 :param msg: Message to process.
602 if not msg.message.count(u"stats runtime"):
606 if self._sh_run_counter > 1:
609 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
610 self._data[u"tests"][self._test_id][u"show-run"] = dict()
612 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
616 host = groups.group(1)
617 except (AttributeError, IndexError):
620 sock = groups.group(2)
621 except (AttributeError, IndexError):
624 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
625 replace(u"'", u'"').replace(u'b"', u'"').
626 replace(u'u"', u'"').split(u":", 1)[1])
629 threads_nr = len(runtime[0][u"clocks"])
630 except (IndexError, KeyError):
633 dut = u"DUT{nr}".format(
634 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
639 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
643 for idx in range(threads_nr):
644 if item[u"vectors"][idx] > 0:
645 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
646 elif item[u"calls"][idx] > 0:
647 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
648 elif item[u"suspends"][idx] > 0:
649 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
653 if item[u"calls"][idx] > 0:
654 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
658 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
659 int(item[u"suspends"][idx]):
660 oper[u"threads"][idx].append([
663 item[u"vectors"][idx],
664 item[u"suspends"][idx],
669 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
671 def _get_ndrpdr_throughput(self, msg):
672 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
675 :param msg: The test message to be parsed.
677 :returns: Parsed data as a dict and the status (PASS/FAIL).
678 :rtype: tuple(dict, str)
682 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
683 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
686 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
688 if groups is not None:
690 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
691 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
692 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
693 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
695 except (IndexError, ValueError):
698 return throughput, status
700 def _get_plr_throughput(self, msg):
701 """Get PLRsearch lower bound and PLRsearch upper bound from the test
704 :param msg: The test message to be parsed.
706 :returns: Parsed data as a dict and the status (PASS/FAIL).
707 :rtype: tuple(dict, str)
715 groups = re.search(self.REGEX_PLR_RATE, msg)
717 if groups is not None:
719 throughput[u"LOWER"] = float(groups.group(1))
720 throughput[u"UPPER"] = float(groups.group(2))
722 except (IndexError, ValueError):
725 return throughput, status
727 def _get_ndrpdr_latency(self, msg):
728 """Get LATENCY from the test message.
730 :param msg: The test message to be parsed.
732 :returns: Parsed data as a dict and the status (PASS/FAIL).
733 :rtype: tuple(dict, str)
743 u"direction1": copy.copy(latency_default),
744 u"direction2": copy.copy(latency_default)
747 u"direction1": copy.copy(latency_default),
748 u"direction2": copy.copy(latency_default)
751 u"direction1": copy.copy(latency_default),
752 u"direction2": copy.copy(latency_default)
755 u"direction1": copy.copy(latency_default),
756 u"direction2": copy.copy(latency_default)
759 u"direction1": copy.copy(latency_default),
760 u"direction2": copy.copy(latency_default)
763 u"direction1": copy.copy(latency_default),
764 u"direction2": copy.copy(latency_default)
768 # TODO: Rewrite when long and base are not needed
769 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
771 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
773 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
775 return latency, u"FAIL"
777 def process_latency(in_str):
778 """Return object with parsed latency values.
780 TODO: Define class for the return type.
782 :param in_str: Input string, min/avg/max/hdrh format.
784 :returns: Dict with corresponding keys, except hdrh float values.
786 :throws IndexError: If in_str does not have enough substrings.
787 :throws ValueError: If a substring does not convert to float.
789 in_list = in_str.split('/', 3)
792 u"min": float(in_list[0]),
793 u"avg": float(in_list[1]),
794 u"max": float(in_list[2]),
798 if len(in_list) == 4:
799 rval[u"hdrh"] = str(in_list[3])
804 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
805 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
806 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
807 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
808 if groups.lastindex == 4:
809 return latency, u"PASS"
810 except (IndexError, ValueError):
814 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
815 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
816 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
817 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
818 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
819 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
820 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
821 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
822 if groups.lastindex == 12:
823 return latency, u"PASS"
824 except (IndexError, ValueError):
827 # TODO: Remove when not needed
828 latency[u"NDR10"] = {
829 u"direction1": copy.copy(latency_default),
830 u"direction2": copy.copy(latency_default)
832 latency[u"NDR50"] = {
833 u"direction1": copy.copy(latency_default),
834 u"direction2": copy.copy(latency_default)
836 latency[u"NDR90"] = {
837 u"direction1": copy.copy(latency_default),
838 u"direction2": copy.copy(latency_default)
841 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
842 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
843 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
844 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
845 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
846 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
847 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
848 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
849 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
850 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
851 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
852 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
853 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
854 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
855 return latency, u"PASS"
856 except (IndexError, ValueError):
859 return latency, u"FAIL"
861 def visit_suite(self, suite):
862 """Implements traversing through the suite and its direct children.
864 :param suite: Suite to process.
868 if self.start_suite(suite) is not False:
869 suite.suites.visit(self)
870 suite.tests.visit(self)
871 self.end_suite(suite)
873 def start_suite(self, suite):
874 """Called when suite starts.
876 :param suite: Suite to process.
882 parent_name = suite.parent.name
883 except AttributeError:
886 doc_str = suite.doc.\
887 replace(u'"', u"'").\
888 replace(u'\n', u' ').\
889 replace(u'\r', u'').\
890 replace(u'*[', u' |br| *[').\
891 replace(u"*", u"**").\
892 replace(u' |br| *[', u'*[', 1)
894 self._data[u"suites"][suite.longname.lower().
896 replace(u" ", u"_")] = {
897 u"name": suite.name.lower(),
899 u"parent": parent_name,
900 u"level": len(suite.longname.split(u"."))
903 suite.keywords.visit(self)
905 def end_suite(self, suite):
906 """Called when suite ends.
908 :param suite: Suite to process.
913 def visit_test(self, test):
914 """Implements traversing through the test.
916 :param test: Test to process.
920 if self.start_test(test) is not False:
921 test.keywords.visit(self)
924 def start_test(self, test):
925 """Called when test starts.
927 :param test: Test to process.
932 self._sh_run_counter = 0
934 longname_orig = test.longname.lower()
936 # Check the ignore list
937 if longname_orig in self._ignore:
940 tags = [str(tag) for tag in test.tags]
943 # Change the TC long name and name if defined in the mapping table
944 longname = self._mapping.get(longname_orig, None)
945 if longname is not None:
946 name = longname.split(u'.')[-1]
948 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
952 longname = longname_orig
953 name = test.name.lower()
955 # Remove TC number from the TC long name (backward compatibility):
956 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
957 # Remove TC number from the TC name (not needed):
958 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
960 test_result[u"parent"] = test.parent.name.lower()
961 test_result[u"tags"] = tags
962 test_result["doc"] = test.doc.\
963 replace(u'"', u"'").\
964 replace(u'\n', u' ').\
965 replace(u'\r', u'').\
966 replace(u'[', u' |br| [').\
967 replace(u' |br| [', u'[', 1)
968 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
969 replace(u'\n', u' |br| ').\
970 replace(u'\r', u'').\
972 test_result[u"type"] = u"FUNC"
973 test_result[u"status"] = test.status
975 if u"PERFTEST" in tags:
976 # Replace info about cores (e.g. -1c-) with the info about threads
977 # and cores (e.g. -1t1c-) in the long test case names and in the
978 # test case names if necessary.
979 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
983 for tag in test_result[u"tags"]:
984 groups = re.search(self.REGEX_TC_TAG, tag)
990 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
991 f"-{tag_tc.lower()}-",
994 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
995 f"-{tag_tc.lower()}-",
999 test_result[u"status"] = u"FAIL"
1000 self._data[u"tests"][self._test_id] = test_result
1002 f"The test {self._test_id} has no or more than one "
1003 f"multi-threading tags.\n"
1004 f"Tags: {test_result[u'tags']}"
1008 if test.status == u"PASS":
1009 if u"NDRPDR" in tags:
1010 test_result[u"type"] = u"NDRPDR"
1011 test_result[u"throughput"], test_result[u"status"] = \
1012 self._get_ndrpdr_throughput(test.message)
1013 test_result[u"latency"], test_result[u"status"] = \
1014 self._get_ndrpdr_latency(test.message)
1015 elif u"SOAK" in tags:
1016 test_result[u"type"] = u"SOAK"
1017 test_result[u"throughput"], test_result[u"status"] = \
1018 self._get_plr_throughput(test.message)
1019 elif u"TCP" in tags:
1020 test_result[u"type"] = u"TCP"
1021 groups = re.search(self.REGEX_TCP, test.message)
1022 test_result[u"result"] = int(groups.group(2))
1023 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1025 test_result[u"type"] = u"MRR"
1027 test_result[u"type"] = u"BMRR"
1029 test_result[u"result"] = dict()
1030 groups = re.search(self.REGEX_BMRR, test.message)
1031 if groups is not None:
1032 items_str = groups.group(1)
1033 items_float = [float(item.strip()) for item
1034 in items_str.split(",")]
1035 # Use whole list in CSIT-1180.
1036 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1037 test_result[u"result"][u"receive-rate"] = stats.avg
1039 groups = re.search(self.REGEX_MRR, test.message)
1040 test_result[u"result"][u"receive-rate"] = \
1041 float(groups.group(3)) / float(groups.group(1))
1042 elif u"RECONF" in tags:
1043 test_result[u"type"] = u"RECONF"
1044 test_result[u"result"] = None
1046 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1047 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1048 test_result[u"result"] = {
1049 u"loss": int(grps_loss.group(1)),
1050 u"time": float(grps_time.group(1))
1052 except (AttributeError, IndexError, ValueError, TypeError):
1053 test_result[u"status"] = u"FAIL"
1055 test_result[u"status"] = u"FAIL"
1056 self._data[u"tests"][self._test_id] = test_result
1059 self._data[u"tests"][self._test_id] = test_result
1061 def end_test(self, test):
1062 """Called when test ends.
1064 :param test: Test to process.
1069 def visit_keyword(self, keyword):
1070 """Implements traversing through the keyword and its child keywords.
1072 :param keyword: Keyword to process.
1073 :type keyword: Keyword
1076 if self.start_keyword(keyword) is not False:
1077 self.end_keyword(keyword)
1079 def start_keyword(self, keyword):
1080 """Called when keyword starts. Default implementation does nothing.
1082 :param keyword: Keyword to process.
1083 :type keyword: Keyword
1087 if keyword.type == u"setup":
1088 self.visit_setup_kw(keyword)
1089 elif keyword.type == u"teardown":
1090 self.visit_teardown_kw(keyword)
1092 self.visit_test_kw(keyword)
1093 except AttributeError:
1096 def end_keyword(self, keyword):
1097 """Called when keyword ends. Default implementation does nothing.
1099 :param keyword: Keyword to process.
1100 :type keyword: Keyword
1104 def visit_test_kw(self, test_kw):
1105 """Implements traversing through the test keyword and its child
1108 :param test_kw: Keyword to process.
1109 :type test_kw: Keyword
1112 for keyword in test_kw.keywords:
1113 if self.start_test_kw(keyword) is not False:
1114 self.visit_test_kw(keyword)
1115 self.end_test_kw(keyword)
1117 def start_test_kw(self, test_kw):
1118 """Called when test keyword starts. Default implementation does
1121 :param test_kw: Keyword to process.
1122 :type test_kw: Keyword
1125 if test_kw.name.count(u"Show Runtime On All Duts") or \
1126 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1127 self._msg_type = u"test-show-runtime"
1128 self._sh_run_counter += 1
1129 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1130 self._msg_type = u"dpdk-version"
1133 test_kw.messages.visit(self)
1135 def end_test_kw(self, test_kw):
1136 """Called when keyword ends. Default implementation does nothing.
1138 :param test_kw: Keyword to process.
1139 :type test_kw: Keyword
1143 def visit_setup_kw(self, setup_kw):
1144 """Implements traversing through the teardown keyword and its child
1147 :param setup_kw: Keyword to process.
1148 :type setup_kw: Keyword
1151 for keyword in setup_kw.keywords:
1152 if self.start_setup_kw(keyword) is not False:
1153 self.visit_setup_kw(keyword)
1154 self.end_setup_kw(keyword)
1156 def start_setup_kw(self, setup_kw):
1157 """Called when teardown keyword starts. Default implementation does
1160 :param setup_kw: Keyword to process.
1161 :type setup_kw: Keyword
1164 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1165 and not self._version:
1166 self._msg_type = u"vpp-version"
1167 elif setup_kw.name.count(u"Set Global Variable") \
1168 and not self._timestamp:
1169 self._msg_type = u"timestamp"
1170 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1171 self._msg_type = u"testbed"
1174 setup_kw.messages.visit(self)
1176 def end_setup_kw(self, setup_kw):
1177 """Called when keyword ends. Default implementation does nothing.
1179 :param setup_kw: Keyword to process.
1180 :type setup_kw: Keyword
1184 def visit_teardown_kw(self, teardown_kw):
1185 """Implements traversing through the teardown keyword and its child
1188 :param teardown_kw: Keyword to process.
1189 :type teardown_kw: Keyword
1192 for keyword in teardown_kw.keywords:
1193 if self.start_teardown_kw(keyword) is not False:
1194 self.visit_teardown_kw(keyword)
1195 self.end_teardown_kw(keyword)
1197 def start_teardown_kw(self, teardown_kw):
1198 """Called when teardown keyword starts
1200 :param teardown_kw: Keyword to process.
1201 :type teardown_kw: Keyword
1205 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1206 # TODO: Remove when not needed:
1207 self._conf_history_lookup_nr = 0
1208 self._msg_type = u"teardown-vat-history"
1209 teardown_kw.messages.visit(self)
1210 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1211 self._conf_history_lookup_nr = 0
1212 self._msg_type = u"teardown-papi-history"
1213 teardown_kw.messages.visit(self)
1215 def end_teardown_kw(self, teardown_kw):
1216 """Called when keyword ends. Default implementation does nothing.
1218 :param teardown_kw: Keyword to process.
1219 :type teardown_kw: Keyword
1223 def visit_message(self, msg):
1224 """Implements visiting the message.
1226 :param msg: Message to process.
1230 if self.start_message(msg) is not False:
1231 self.end_message(msg)
1233 def start_message(self, msg):
1234 """Called when message starts. Get required information from messages:
1237 :param msg: Message to process.
1243 self.parse_msg[self._msg_type](msg)
1245 def end_message(self, msg):
1246 """Called when message ends. Default implementation does nothing.
1248 :param msg: Message to process.
1257 The data is extracted from output.xml files generated by Jenkins jobs and
1258 stored in pandas' DataFrames.
1264 (as described in ExecutionChecker documentation)
1266 (as described in ExecutionChecker documentation)
1268 (as described in ExecutionChecker documentation)
1271 def __init__(self, spec):
1274 :param spec: Specification.
1275 :type spec: Specification
1282 self._input_data = pd.Series()
1286 """Getter - Input data.
1288 :returns: Input data
1289 :rtype: pandas.Series
1291 return self._input_data
1293 def metadata(self, job, build):
1294 """Getter - metadata
1296 :param job: Job which metadata we want.
1297 :param build: Build which metadata we want.
1301 :rtype: pandas.Series
1304 return self.data[job][build][u"metadata"]
1306 def suites(self, job, build):
1309 :param job: Job which suites we want.
1310 :param build: Build which suites we want.
1314 :rtype: pandas.Series
1317 return self.data[job][str(build)][u"suites"]
1319 def tests(self, job, build):
1322 :param job: Job which tests we want.
1323 :param build: Build which tests we want.
1327 :rtype: pandas.Series
1330 return self.data[job][build][u"tests"]
1332 def _parse_tests(self, job, build, log):
1333 """Process data from robot output.xml file and return JSON structured
1336 :param job: The name of job which build output data will be processed.
1337 :param build: The build which output data will be processed.
1338 :param log: List of log messages.
1341 :type log: list of tuples (severity, msg)
1342 :returns: JSON data structure.
1351 with open(build[u"file-name"], u'r') as data_file:
1353 result = ExecutionResult(data_file)
1354 except errors.DataError as err:
1356 (u"ERROR", f"Error occurred while parsing output.xml: "
1360 checker = ExecutionChecker(metadata, self._cfg.mapping,
1362 result.visit(checker)
1366 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1367 """Download and parse the input data file.
1369 :param pid: PID of the process executing this method.
1370 :param job: Name of the Jenkins job which generated the processed input
1372 :param build: Information about the Jenkins build which generated the
1373 processed input file.
1374 :param repeat: Repeat the download specified number of times if not
1385 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1393 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1401 f"It is not possible to download the input data file from the "
1402 f"job {job}, build {build[u'build']}, or it is damaged. "
1408 f" Processing data from the build {build[u'build']} ...")
1410 data = self._parse_tests(job, build, logs)
1414 f"Input data file from the job {job}, build "
1415 f"{build[u'build']} is damaged. Skipped.")
1418 state = u"processed"
1421 remove(build[u"file-name"])
1422 except OSError as err:
1424 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1428 # If the time-period is defined in the specification file, remove all
1429 # files which are outside the time period.
1430 timeperiod = self._cfg.input.get(u"time-period", None)
1431 if timeperiod and data:
1433 timeperiod = timedelta(int(timeperiod))
1434 metadata = data.get(u"metadata", None)
1436 generated = metadata.get(u"generated", None)
1438 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1439 if (now - generated) > timeperiod:
1440 # Remove the data and the file:
1445 f" The build {job}/{build[u'build']} is "
1446 f"outdated, will be removed.")
1448 logs.append((u"INFO", u" Done."))
1450 for level, line in logs:
1451 if level == u"INFO":
1453 elif level == u"ERROR":
1455 elif level == u"DEBUG":
1457 elif level == u"CRITICAL":
1458 logging.critical(line)
1459 elif level == u"WARNING":
1460 logging.warning(line)
1462 return {u"data": data, u"state": state, u"job": job, u"build": build}
1464 def download_and_parse_data(self, repeat=1):
1465 """Download the input data files, parse input data from input files and
1466 store in pandas' Series.
1468 :param repeat: Repeat the download specified number of times if not
1473 logging.info(u"Downloading and parsing input files ...")
1475 for job, builds in self._cfg.builds.items():
1476 for build in builds:
1478 result = self._download_and_parse_build(job, build, repeat)
1479 build_nr = result[u"build"][u"build"]
1482 data = result[u"data"]
1483 build_data = pd.Series({
1484 u"metadata": pd.Series(
1485 list(data[u"metadata"].values()),
1486 index=list(data[u"metadata"].keys())
1488 u"suites": pd.Series(
1489 list(data[u"suites"].values()),
1490 index=list(data[u"suites"].keys())
1492 u"tests": pd.Series(
1493 list(data[u"tests"].values()),
1494 index=list(data[u"tests"].keys())
1498 if self._input_data.get(job, None) is None:
1499 self._input_data[job] = pd.Series()
1500 self._input_data[job][str(build_nr)] = build_data
1502 self._cfg.set_input_file_name(
1503 job, build_nr, result[u"build"][u"file-name"])
1505 self._cfg.set_input_state(job, build_nr, result[u"state"])
1508 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1509 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1511 logging.info(u"Done.")
1514 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1515 """Return the index of character in the string which is the end of tag.
1517 :param tag_filter: The string where the end of tag is being searched.
1518 :param start: The index where the searching is stated.
1519 :param closer: The character which is the tag closer.
1520 :type tag_filter: str
1523 :returns: The index of the tag closer.
1528 idx_opener = tag_filter.index(closer, start)
1529 return tag_filter.index(closer, idx_opener + 1)
1534 def _condition(tag_filter):
1535 """Create a conditional statement from the given tag filter.
1537 :param tag_filter: Filter based on tags from the element specification.
1538 :type tag_filter: str
1539 :returns: Conditional statement which can be evaluated.
1545 index = InputData._end_of_tag(tag_filter, index)
1549 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1551 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1552 continue_on_error=False):
1553 """Filter required data from the given jobs and builds.
1555 The output data structure is:
1559 - test (or suite) 1 ID:
1565 - test (or suite) n ID:
1572 :param element: Element which will use the filtered data.
1573 :param params: Parameters which will be included in the output. If None,
1574 all parameters are included.
1575 :param data: If not None, this data is used instead of data specified
1577 :param data_set: The set of data to be filtered: tests, suites,
1579 :param continue_on_error: Continue if there is error while reading the
1580 data. The Item will be empty then
1581 :type element: pandas.Series
1585 :type continue_on_error: bool
1586 :returns: Filtered data.
1587 :rtype pandas.Series
1591 if data_set == "suites":
1593 elif element[u"filter"] in (u"all", u"template"):
1596 cond = InputData._condition(element[u"filter"])
1597 logging.debug(f" Filter: {cond}")
1599 logging.error(u" No filter defined.")
1603 params = element.get(u"parameters", None)
1605 params.append(u"type")
1607 data_to_filter = data if data else element[u"data"]
1610 for job, builds in data_to_filter.items():
1611 data[job] = pd.Series()
1612 for build in builds:
1613 data[job][str(build)] = pd.Series()
1616 self.data[job][str(build)][data_set].items())
1618 if continue_on_error:
1622 for test_id, test_data in data_dict.items():
1623 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1624 data[job][str(build)][test_id] = pd.Series()
1626 for param, val in test_data.items():
1627 data[job][str(build)][test_id][param] = val
1629 for param in params:
1631 data[job][str(build)][test_id][param] =\
1634 data[job][str(build)][test_id][param] =\
1638 except (KeyError, IndexError, ValueError) as err:
1640 f"Missing mandatory parameter in the element specification: "
1644 except AttributeError as err:
1645 logging.error(repr(err))
1647 except SyntaxError as err:
1649 f"The filter {cond} is not correct. Check if all tags are "
1650 f"enclosed by apostrophes.\n{repr(err)}"
1654 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1655 continue_on_error=False):
1656 """Filter required data from the given jobs and builds.
1658 The output data structure is:
1662 - test (or suite) 1 ID:
1668 - test (or suite) n ID:
1675 :param element: Element which will use the filtered data.
1676 :param params: Parameters which will be included in the output. If None,
1677 all parameters are included.
1678 :param data_set: The set of data to be filtered: tests, suites,
1680 :param continue_on_error: Continue if there is error while reading the
1681 data. The Item will be empty then
1682 :type element: pandas.Series
1685 :type continue_on_error: bool
1686 :returns: Filtered data.
1687 :rtype pandas.Series
1690 include = element.get(u"include", None)
1692 logging.warning(u"No tests to include, skipping the element.")
1696 params = element.get(u"parameters", None)
1698 params.append(u"type")
1702 for job, builds in element[u"data"].items():
1703 data[job] = pd.Series()
1704 for build in builds:
1705 data[job][str(build)] = pd.Series()
1706 for test in include:
1708 reg_ex = re.compile(str(test).lower())
1709 for test_id in self.data[job][
1710 str(build)][data_set].keys():
1711 if re.match(reg_ex, str(test_id).lower()):
1712 test_data = self.data[job][
1713 str(build)][data_set][test_id]
1714 data[job][str(build)][test_id] = pd.Series()
1716 for param, val in test_data.items():
1717 data[job][str(build)][test_id]\
1720 for param in params:
1722 data[job][str(build)][
1726 data[job][str(build)][
1727 test_id][param] = u"No Data"
1728 except KeyError as err:
1729 logging.error(repr(err))
1730 if continue_on_error:
1735 except (KeyError, IndexError, ValueError) as err:
1737 f"Missing mandatory parameter in the element "
1738 f"specification: {repr(err)}"
1741 except AttributeError as err:
1742 logging.error(repr(err))
1746 def merge_data(data):
1747 """Merge data from more jobs and builds to a simple data structure.
1749 The output data structure is:
1751 - test (suite) 1 ID:
1757 - test (suite) n ID:
1760 :param data: Data to merge.
1761 :type data: pandas.Series
1762 :returns: Merged data.
1763 :rtype: pandas.Series
1766 logging.info(u" Merging data ...")
1768 merged_data = pd.Series()
1769 for builds in data.values:
1770 for item in builds.values:
1771 for item_id, item_data in item.items():
1772 merged_data[item_id] = item_data
1776 def print_all_oper_data(self):
1777 """Print all operational data to console.
1785 u"Cycles per Packet",
1786 u"Average Vector Size"
1789 for job in self._input_data.values:
1790 for build in job.values:
1791 for test_id, test_data in build[u"tests"].items():
1793 if test_data.get(u"show-run", None) is None:
1795 for dut_name, data in test_data[u"show-run"].items():
1796 if data.get(u"threads", None) is None:
1798 print(f"Host IP: {data.get(u'host', '')}, "
1799 f"Socket: {data.get(u'socket', '')}")
1800 for thread_nr, thread in data[u"threads"].items():
1801 txt_table = prettytable.PrettyTable(tbl_hdr)
1804 txt_table.add_row(row)
1806 if len(thread) == 0:
1809 avg = f", Average Vector Size per Node: " \
1810 f"{(avg / len(thread)):.2f}"
1811 th_name = u"main" if thread_nr == 0 \
1812 else f"worker_{thread_nr}"
1813 print(f"{dut_name}, {th_name}{avg}")
1814 txt_table.float_format = u".2"
1815 txt_table.align = u"r"
1816 txt_table.align[u"Name"] = u"l"
1817 print(f"{txt_table.get_string()}\n")