1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
218 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
219 r'NDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
220 r'PDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
221 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
222 r'PDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*)'
224 # TODO: Remove when not needed
225 REGEX_NDRPDR_LAT_BASE = re.compile(
226 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
229 REGEX_NDRPDR_LAT = re.compile(
230 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
231 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
237 # TODO: Remove when not needed
238 REGEX_NDRPDR_LAT_LONG = re.compile(
239 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
249 REGEX_VERSION_VPP = re.compile(
250 r"(return STDOUT Version:\s*|"
251 r"VPP Version:\s*|VPP version:\s*)(.*)"
253 REGEX_VERSION_DPDK = re.compile(
254 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
256 REGEX_TCP = re.compile(
257 r'Total\s(rps|cps|throughput):\s(\d*).*$'
259 REGEX_MRR = re.compile(
260 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
261 r'tx\s(\d*),\srx\s(\d*)'
263 REGEX_BMRR = re.compile(
264 r'Maximum Receive Rate trial results'
265 r' in packets per second: \[(.*)\]'
267 REGEX_RECONF_LOSS = re.compile(
268 r'Packets lost due to reconfig: (\d*)'
270 REGEX_RECONF_TIME = re.compile(
271 r'Implied time lost: (\d*.[\de-]*)'
273 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
275 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
277 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
279 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
281 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
283 def __init__(self, metadata, mapping, ignore):
286 :param metadata: Key-value pairs to be included in "metadata" part of
288 :param mapping: Mapping of the old names of test cases to the new
290 :param ignore: List of TCs to be ignored.
296 # Type of message to parse out from the test messages
297 self._msg_type = None
303 self._timestamp = None
305 # Testbed. The testbed is identified by TG node IP address.
308 # Mapping of TCs long names
309 self._mapping = mapping
312 self._ignore = ignore
314 # Number of PAPI History messages found:
316 # 1 - PAPI History of DUT1
317 # 2 - PAPI History of DUT2
318 self._conf_history_lookup_nr = 0
320 self._sh_run_counter = 0
322 # Test ID of currently processed test- the lowercase full path to the
326 # The main data structure
328 u"metadata": OrderedDict(),
329 u"suites": OrderedDict(),
330 u"tests": OrderedDict()
333 # Save the provided metadata
334 for key, val in metadata.items():
335 self._data[u"metadata"][key] = val
337 # Dictionary defining the methods used to parse different types of
340 u"timestamp": self._get_timestamp,
341 u"vpp-version": self._get_vpp_version,
342 u"dpdk-version": self._get_dpdk_version,
343 # TODO: Remove when not needed:
344 u"teardown-vat-history": self._get_vat_history,
345 u"teardown-papi-history": self._get_papi_history,
346 u"test-show-runtime": self._get_show_run,
347 u"testbed": self._get_testbed
352 """Getter - Data parsed from the XML file.
354 :returns: Data parsed from the XML file.
359 def _get_data_from_perf_test_msg(self, msg):
367 from message of NDRPDR performance tests.
369 :param msg: Message to be processed.
371 :returns: Processed message or original message if a problem occurs.
375 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
376 if not groups or groups.lastindex != 20:
381 u"ndr_low": float(groups.group(1)),
382 u"ndr_low_unit": groups.group(2),
383 u"ndr_low_b": float(groups.group(3)),
384 u"ndr_low_b_unit": groups.group(4),
385 u"ndr_lat_1": groups.group(5),
386 u"ndr_lat_2": groups.group(6),
387 u"ndr_up": float(groups.group(7)),
388 u"ndr_up_unit": groups.group(8),
389 u"ndr_up_b": float(groups.group(9)),
390 u"ndr_up_b_unit": groups.group(10),
391 u"pdr_low": float(groups.group(11)),
392 u"pdr_low_unit": groups.group(12),
393 u"pdr_low_b": float(groups.group(13)),
394 u"pdr_low_b_unit": groups.group(14),
395 u"pdr_lat_1": groups.group(15),
396 u"pdr_lat_2": groups.group(16),
397 u"pdr_up": float(groups.group(17)),
398 u"pdr_up_unit": groups.group(18),
399 u"pdr_up_b": float(groups.group(19)),
400 u"pdr_up_b_unit": groups.group(20)
402 except (AttributeError, IndexError, ValueError, KeyError):
405 def _process_lat(in_str_1, in_str_2):
406 """Extract min, avg, max values from latency string.
408 :param in_str_1: Latency string for one direction produced by robot
410 :param in_str_2: Latency string for second direction produced by
414 :returns: Processed latency string or original string if a problem
416 :rtype: tuple(str, str)
418 in_list_1 = in_str_1.split('/', 3)
419 if len(in_list_1) < 3:
420 return u"Not Measured.", u"Not Measured."
422 in_list_2 = in_str_2.split('/', 3)
423 if len(in_list_2) < 3:
424 return u"Not Measured.", u"Not Measured."
427 if len(in_list_1) == 4:
428 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
430 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
431 except hdrh.codec.HdrLengthException:
434 if len(in_list_2) == 4:
435 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
437 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
438 except hdrh.codec.HdrLengthException:
441 hdr_lat = u"Not Measured."
442 if hdr_lat_1 and hdr_lat_2:
444 f"50%/90%/99%/99.9%, "
445 f"{hdr_lat_1.get_value_at_percentile(50.0)}/"
446 f"{hdr_lat_1.get_value_at_percentile(90.0)}/"
447 f"{hdr_lat_1.get_value_at_percentile(99.0)}/"
448 f"{hdr_lat_1.get_value_at_percentile(99.9)}, "
449 f"{hdr_lat_2.get_value_at_percentile(50.0)}/"
450 f"{hdr_lat_2.get_value_at_percentile(90.0)}/"
451 f"{hdr_lat_2.get_value_at_percentile(99.0)}/"
452 f"{hdr_lat_2.get_value_at_percentile(99.9)} "
458 f"{in_list_1[0]}/{in_list_1[1]}/{in_list_1[2]}, "
459 f"{in_list_2[0]}/{in_list_2[1]}/{in_list_2[2]} uSec.",
464 pdr_lat = _process_lat(data[u'pdr_lat_1'], data[u'pdr_lat_2'])
465 ndr_lat = _process_lat(data[u'ndr_lat_1'], data[u'ndr_lat_2'])
467 f"NDR Throughput: {(data[u'ndr_low'] / 1e6):.2f} "
468 f"M{data[u'ndr_low_unit']}, "
469 f"{data[u'ndr_low_b']:.2f} {data[u'ndr_low_b_unit']}.\n"
470 f"One-Way Latency at NDR: {ndr_lat[0]}\n"
471 f"One-Way Latency at NDR by percentiles: {ndr_lat[1]}\n"
472 f"PDR Throughput: {(data[u'pdr_low'] / 1e6):.2f} "
473 f"M{data[u'pdr_low_unit']}, "
474 f"{data[u'pdr_low_b']:.2f} {data[u'pdr_low_b_unit']}.\n"
475 f"One-Way Latency at PDR: {pdr_lat[0]}\n"
476 f"One-Way Latency at PDR by percentiles: {pdr_lat[1]}"
478 except (AttributeError, IndexError, ValueError, KeyError):
481 def _get_testbed(self, msg):
482 """Called when extraction of testbed IP is required.
483 The testbed is identified by TG node IP address.
485 :param msg: Message to process.
490 if msg.message.count(u"Setup of TG node") or \
491 msg.message.count(u"Setup of node TG host"):
492 reg_tg_ip = re.compile(
493 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
495 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
496 except (KeyError, ValueError, IndexError, AttributeError):
499 self._data[u"metadata"][u"testbed"] = self._testbed
500 self._msg_type = None
502 def _get_vpp_version(self, msg):
503 """Called when extraction of VPP version is required.
505 :param msg: Message to process.
510 if msg.message.count(u"return STDOUT Version:") or \
511 msg.message.count(u"VPP Version:") or \
512 msg.message.count(u"VPP version:"):
513 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
515 self._data[u"metadata"][u"version"] = self._version
516 self._msg_type = None
518 def _get_dpdk_version(self, msg):
519 """Called when extraction of DPDK version is required.
521 :param msg: Message to process.
526 if msg.message.count(u"DPDK Version:"):
528 self._version = str(re.search(
529 self.REGEX_VERSION_DPDK, msg.message).group(2))
530 self._data[u"metadata"][u"version"] = self._version
534 self._msg_type = None
536 def _get_timestamp(self, msg):
537 """Called when extraction of timestamp is required.
539 :param msg: Message to process.
544 self._timestamp = msg.timestamp[:14]
545 self._data[u"metadata"][u"generated"] = self._timestamp
546 self._msg_type = None
548 def _get_vat_history(self, msg):
549 """Called when extraction of VAT command history is required.
551 TODO: Remove when not needed.
553 :param msg: Message to process.
557 if msg.message.count(u"VAT command history:"):
558 self._conf_history_lookup_nr += 1
559 if self._conf_history_lookup_nr == 1:
560 self._data[u"tests"][self._test_id][u"conf-history"] = str()
562 self._msg_type = None
563 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
564 r"VAT command history:", u"",
565 msg.message, count=1).replace(u'\n', u' |br| ').\
568 self._data[u"tests"][self._test_id][u"conf-history"] += (
569 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
572 def _get_papi_history(self, msg):
573 """Called when extraction of PAPI command history is required.
575 :param msg: Message to process.
579 if msg.message.count(u"PAPI command history:"):
580 self._conf_history_lookup_nr += 1
581 if self._conf_history_lookup_nr == 1:
582 self._data[u"tests"][self._test_id][u"conf-history"] = str()
584 self._msg_type = None
585 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
586 r"PAPI command history:", u"",
587 msg.message, count=1).replace(u'\n', u' |br| ').\
589 self._data[u"tests"][self._test_id][u"conf-history"] += (
590 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
593 def _get_show_run(self, msg):
594 """Called when extraction of VPP operational data (output of CLI command
595 Show Runtime) is required.
597 :param msg: Message to process.
602 if not msg.message.count(u"stats runtime"):
605 self._sh_run_counter += 1
608 if self._sh_run_counter > 1:
611 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
612 self._data[u"tests"][self._test_id][u"show-run"] = dict()
614 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
618 host = groups.group(1)
619 except (AttributeError, IndexError):
622 sock = groups.group(2)
623 except (AttributeError, IndexError):
626 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
627 replace(u"'", u'"').replace(u'b"', u'"').
628 replace(u'u"', u'"').split(u":", 1)[1])
631 threads_nr = len(runtime[0][u"clocks"])
632 except (IndexError, KeyError):
635 dut = u"DUT{nr}".format(
636 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
641 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
645 for idx in range(threads_nr):
646 if item[u"vectors"][idx] > 0:
647 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
648 elif item[u"calls"][idx] > 0:
649 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
650 elif item[u"suspends"][idx] > 0:
651 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
655 if item[u"calls"][idx] > 0:
656 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
660 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
661 int(item[u"suspends"][idx]):
662 oper[u"threads"][idx].append([
665 item[u"vectors"][idx],
666 item[u"suspends"][idx],
671 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
673 def _get_ndrpdr_throughput(self, msg):
674 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
677 :param msg: The test message to be parsed.
679 :returns: Parsed data as a dict and the status (PASS/FAIL).
680 :rtype: tuple(dict, str)
684 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
685 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
688 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
690 if groups is not None:
692 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
693 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
694 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
695 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
697 except (IndexError, ValueError):
700 return throughput, status
702 def _get_plr_throughput(self, msg):
703 """Get PLRsearch lower bound and PLRsearch upper bound from the test
706 :param msg: The test message to be parsed.
708 :returns: Parsed data as a dict and the status (PASS/FAIL).
709 :rtype: tuple(dict, str)
717 groups = re.search(self.REGEX_PLR_RATE, msg)
719 if groups is not None:
721 throughput[u"LOWER"] = float(groups.group(1))
722 throughput[u"UPPER"] = float(groups.group(2))
724 except (IndexError, ValueError):
727 return throughput, status
729 def _get_ndrpdr_latency(self, msg):
730 """Get LATENCY from the test message.
732 :param msg: The test message to be parsed.
734 :returns: Parsed data as a dict and the status (PASS/FAIL).
735 :rtype: tuple(dict, str)
745 u"direction1": copy.copy(latency_default),
746 u"direction2": copy.copy(latency_default)
749 u"direction1": copy.copy(latency_default),
750 u"direction2": copy.copy(latency_default)
753 u"direction1": copy.copy(latency_default),
754 u"direction2": copy.copy(latency_default)
757 u"direction1": copy.copy(latency_default),
758 u"direction2": copy.copy(latency_default)
761 u"direction1": copy.copy(latency_default),
762 u"direction2": copy.copy(latency_default)
765 u"direction1": copy.copy(latency_default),
766 u"direction2": copy.copy(latency_default)
770 # TODO: Rewrite when long and base are not needed
771 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
773 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
775 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
777 return latency, u"FAIL"
779 def process_latency(in_str):
780 """Return object with parsed latency values.
782 TODO: Define class for the return type.
784 :param in_str: Input string, min/avg/max/hdrh format.
786 :returns: Dict with corresponding keys, except hdrh float values.
788 :throws IndexError: If in_str does not have enough substrings.
789 :throws ValueError: If a substring does not convert to float.
791 in_list = in_str.split('/', 3)
794 u"min": float(in_list[0]),
795 u"avg": float(in_list[1]),
796 u"max": float(in_list[2]),
800 if len(in_list) == 4:
801 rval[u"hdrh"] = str(in_list[3])
806 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
807 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
808 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
809 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
810 if groups.lastindex == 4:
811 return latency, u"PASS"
812 except (IndexError, ValueError):
816 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
817 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
818 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
819 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
820 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
821 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
822 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
823 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
824 if groups.lastindex == 12:
825 return latency, u"PASS"
826 except (IndexError, ValueError):
829 # TODO: Remove when not needed
830 latency[u"NDR10"] = {
831 u"direction1": copy.copy(latency_default),
832 u"direction2": copy.copy(latency_default)
834 latency[u"NDR50"] = {
835 u"direction1": copy.copy(latency_default),
836 u"direction2": copy.copy(latency_default)
838 latency[u"NDR90"] = {
839 u"direction1": copy.copy(latency_default),
840 u"direction2": copy.copy(latency_default)
843 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
844 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
845 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
846 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
847 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
848 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
849 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
850 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
851 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
852 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
853 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
854 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
855 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
856 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
857 return latency, u"PASS"
858 except (IndexError, ValueError):
861 return latency, u"FAIL"
863 def visit_suite(self, suite):
864 """Implements traversing through the suite and its direct children.
866 :param suite: Suite to process.
870 if self.start_suite(suite) is not False:
871 suite.suites.visit(self)
872 suite.tests.visit(self)
873 self.end_suite(suite)
875 def start_suite(self, suite):
876 """Called when suite starts.
878 :param suite: Suite to process.
884 parent_name = suite.parent.name
885 except AttributeError:
888 doc_str = suite.doc.\
889 replace(u'"', u"'").\
890 replace(u'\n', u' ').\
891 replace(u'\r', u'').\
892 replace(u'*[', u' |br| *[').\
893 replace(u"*", u"**").\
894 replace(u' |br| *[', u'*[', 1)
896 self._data[u"suites"][suite.longname.lower().
898 replace(u" ", u"_")] = {
899 u"name": suite.name.lower(),
901 u"parent": parent_name,
902 u"level": len(suite.longname.split(u"."))
905 suite.keywords.visit(self)
907 def end_suite(self, suite):
908 """Called when suite ends.
910 :param suite: Suite to process.
915 def visit_test(self, test):
916 """Implements traversing through the test.
918 :param test: Test to process.
922 if self.start_test(test) is not False:
923 test.keywords.visit(self)
926 def start_test(self, test):
927 """Called when test starts.
929 :param test: Test to process.
934 self._sh_run_counter = 0
936 longname_orig = test.longname.lower()
938 # Check the ignore list
939 if longname_orig in self._ignore:
942 tags = [str(tag) for tag in test.tags]
945 # Change the TC long name and name if defined in the mapping table
946 longname = self._mapping.get(longname_orig, None)
947 if longname is not None:
948 name = longname.split(u'.')[-1]
950 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
954 longname = longname_orig
955 name = test.name.lower()
957 # Remove TC number from the TC long name (backward compatibility):
958 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
959 # Remove TC number from the TC name (not needed):
960 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
962 test_result[u"parent"] = test.parent.name.lower()
963 test_result[u"tags"] = tags
964 test_result["doc"] = test.doc.\
965 replace(u'"', u"'").\
966 replace(u'\n', u' ').\
967 replace(u'\r', u'').\
968 replace(u'[', u' |br| [').\
969 replace(u' |br| [', u'[', 1)
970 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
971 replace(u'\n', u' |br| ').\
972 replace(u'\r', u'').\
974 test_result[u"type"] = u"FUNC"
975 test_result[u"status"] = test.status
977 if u"PERFTEST" in tags:
978 # Replace info about cores (e.g. -1c-) with the info about threads
979 # and cores (e.g. -1t1c-) in the long test case names and in the
980 # test case names if necessary.
981 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
985 for tag in test_result[u"tags"]:
986 groups = re.search(self.REGEX_TC_TAG, tag)
992 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
993 f"-{tag_tc.lower()}-",
996 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
997 f"-{tag_tc.lower()}-",
1001 test_result[u"status"] = u"FAIL"
1002 self._data[u"tests"][self._test_id] = test_result
1004 f"The test {self._test_id} has no or more than one "
1005 f"multi-threading tags.\n"
1006 f"Tags: {test_result[u'tags']}"
1010 if test.status == u"PASS":
1011 if u"NDRPDR" in tags:
1012 test_result[u"type"] = u"NDRPDR"
1013 test_result[u"throughput"], test_result[u"status"] = \
1014 self._get_ndrpdr_throughput(test.message)
1015 test_result[u"latency"], test_result[u"status"] = \
1016 self._get_ndrpdr_latency(test.message)
1017 elif u"SOAK" in tags:
1018 test_result[u"type"] = u"SOAK"
1019 test_result[u"throughput"], test_result[u"status"] = \
1020 self._get_plr_throughput(test.message)
1021 elif u"TCP" in tags:
1022 test_result[u"type"] = u"TCP"
1023 groups = re.search(self.REGEX_TCP, test.message)
1024 test_result[u"result"] = int(groups.group(2))
1025 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1027 test_result[u"type"] = u"MRR"
1029 test_result[u"type"] = u"BMRR"
1031 test_result[u"result"] = dict()
1032 groups = re.search(self.REGEX_BMRR, test.message)
1033 if groups is not None:
1034 items_str = groups.group(1)
1035 items_float = [float(item.strip()) for item
1036 in items_str.split(",")]
1037 # Use whole list in CSIT-1180.
1038 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1039 test_result[u"result"][u"receive-rate"] = stats.avg
1041 groups = re.search(self.REGEX_MRR, test.message)
1042 test_result[u"result"][u"receive-rate"] = \
1043 float(groups.group(3)) / float(groups.group(1))
1044 elif u"RECONF" in tags:
1045 test_result[u"type"] = u"RECONF"
1046 test_result[u"result"] = None
1048 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1049 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1050 test_result[u"result"] = {
1051 u"loss": int(grps_loss.group(1)),
1052 u"time": float(grps_time.group(1))
1054 except (AttributeError, IndexError, ValueError, TypeError):
1055 test_result[u"status"] = u"FAIL"
1057 test_result[u"status"] = u"FAIL"
1058 self._data[u"tests"][self._test_id] = test_result
1061 self._data[u"tests"][self._test_id] = test_result
1063 def end_test(self, test):
1064 """Called when test ends.
1066 :param test: Test to process.
1071 def visit_keyword(self, keyword):
1072 """Implements traversing through the keyword and its child keywords.
1074 :param keyword: Keyword to process.
1075 :type keyword: Keyword
1078 if self.start_keyword(keyword) is not False:
1079 self.end_keyword(keyword)
1081 def start_keyword(self, keyword):
1082 """Called when keyword starts. Default implementation does nothing.
1084 :param keyword: Keyword to process.
1085 :type keyword: Keyword
1089 if keyword.type == u"setup":
1090 self.visit_setup_kw(keyword)
1091 elif keyword.type == u"teardown":
1092 self.visit_teardown_kw(keyword)
1094 self.visit_test_kw(keyword)
1095 except AttributeError:
1098 def end_keyword(self, keyword):
1099 """Called when keyword ends. Default implementation does nothing.
1101 :param keyword: Keyword to process.
1102 :type keyword: Keyword
1106 def visit_test_kw(self, test_kw):
1107 """Implements traversing through the test keyword and its child
1110 :param test_kw: Keyword to process.
1111 :type test_kw: Keyword
1114 for keyword in test_kw.keywords:
1115 if self.start_test_kw(keyword) is not False:
1116 self.visit_test_kw(keyword)
1117 self.end_test_kw(keyword)
1119 def start_test_kw(self, test_kw):
1120 """Called when test keyword starts. Default implementation does
1123 :param test_kw: Keyword to process.
1124 :type test_kw: Keyword
1127 if test_kw.name.count(u"Show Runtime On All Duts") or \
1128 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1129 self._msg_type = u"test-show-runtime"
1130 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1131 self._msg_type = u"dpdk-version"
1134 test_kw.messages.visit(self)
1136 def end_test_kw(self, test_kw):
1137 """Called when keyword ends. Default implementation does nothing.
1139 :param test_kw: Keyword to process.
1140 :type test_kw: Keyword
1144 def visit_setup_kw(self, setup_kw):
1145 """Implements traversing through the teardown keyword and its child
1148 :param setup_kw: Keyword to process.
1149 :type setup_kw: Keyword
1152 for keyword in setup_kw.keywords:
1153 if self.start_setup_kw(keyword) is not False:
1154 self.visit_setup_kw(keyword)
1155 self.end_setup_kw(keyword)
1157 def start_setup_kw(self, setup_kw):
1158 """Called when teardown keyword starts. Default implementation does
1161 :param setup_kw: Keyword to process.
1162 :type setup_kw: Keyword
1165 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1166 and not self._version:
1167 self._msg_type = u"vpp-version"
1168 elif setup_kw.name.count(u"Set Global Variable") \
1169 and not self._timestamp:
1170 self._msg_type = u"timestamp"
1171 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1172 self._msg_type = u"testbed"
1175 setup_kw.messages.visit(self)
1177 def end_setup_kw(self, setup_kw):
1178 """Called when keyword ends. Default implementation does nothing.
1180 :param setup_kw: Keyword to process.
1181 :type setup_kw: Keyword
1185 def visit_teardown_kw(self, teardown_kw):
1186 """Implements traversing through the teardown keyword and its child
1189 :param teardown_kw: Keyword to process.
1190 :type teardown_kw: Keyword
1193 for keyword in teardown_kw.keywords:
1194 if self.start_teardown_kw(keyword) is not False:
1195 self.visit_teardown_kw(keyword)
1196 self.end_teardown_kw(keyword)
1198 def start_teardown_kw(self, teardown_kw):
1199 """Called when teardown keyword starts
1201 :param teardown_kw: Keyword to process.
1202 :type teardown_kw: Keyword
1206 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1207 # TODO: Remove when not needed:
1208 self._conf_history_lookup_nr = 0
1209 self._msg_type = u"teardown-vat-history"
1210 teardown_kw.messages.visit(self)
1211 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1212 self._conf_history_lookup_nr = 0
1213 self._msg_type = u"teardown-papi-history"
1214 teardown_kw.messages.visit(self)
1216 def end_teardown_kw(self, teardown_kw):
1217 """Called when keyword ends. Default implementation does nothing.
1219 :param teardown_kw: Keyword to process.
1220 :type teardown_kw: Keyword
1224 def visit_message(self, msg):
1225 """Implements visiting the message.
1227 :param msg: Message to process.
1231 if self.start_message(msg) is not False:
1232 self.end_message(msg)
1234 def start_message(self, msg):
1235 """Called when message starts. Get required information from messages:
1238 :param msg: Message to process.
1244 self.parse_msg[self._msg_type](msg)
1246 def end_message(self, msg):
1247 """Called when message ends. Default implementation does nothing.
1249 :param msg: Message to process.
1258 The data is extracted from output.xml files generated by Jenkins jobs and
1259 stored in pandas' DataFrames.
1265 (as described in ExecutionChecker documentation)
1267 (as described in ExecutionChecker documentation)
1269 (as described in ExecutionChecker documentation)
1272 def __init__(self, spec):
1275 :param spec: Specification.
1276 :type spec: Specification
1283 self._input_data = pd.Series()
1287 """Getter - Input data.
1289 :returns: Input data
1290 :rtype: pandas.Series
1292 return self._input_data
1294 def metadata(self, job, build):
1295 """Getter - metadata
1297 :param job: Job which metadata we want.
1298 :param build: Build which metadata we want.
1302 :rtype: pandas.Series
1305 return self.data[job][build][u"metadata"]
1307 def suites(self, job, build):
1310 :param job: Job which suites we want.
1311 :param build: Build which suites we want.
1315 :rtype: pandas.Series
1318 return self.data[job][str(build)][u"suites"]
1320 def tests(self, job, build):
1323 :param job: Job which tests we want.
1324 :param build: Build which tests we want.
1328 :rtype: pandas.Series
1331 return self.data[job][build][u"tests"]
1333 def _parse_tests(self, job, build, log):
1334 """Process data from robot output.xml file and return JSON structured
1337 :param job: The name of job which build output data will be processed.
1338 :param build: The build which output data will be processed.
1339 :param log: List of log messages.
1342 :type log: list of tuples (severity, msg)
1343 :returns: JSON data structure.
1352 with open(build[u"file-name"], u'r') as data_file:
1354 result = ExecutionResult(data_file)
1355 except errors.DataError as err:
1357 (u"ERROR", f"Error occurred while parsing output.xml: "
1361 checker = ExecutionChecker(metadata, self._cfg.mapping,
1363 result.visit(checker)
1367 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1368 """Download and parse the input data file.
1370 :param pid: PID of the process executing this method.
1371 :param job: Name of the Jenkins job which generated the processed input
1373 :param build: Information about the Jenkins build which generated the
1374 processed input file.
1375 :param repeat: Repeat the download specified number of times if not
1386 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1394 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1402 f"It is not possible to download the input data file from the "
1403 f"job {job}, build {build[u'build']}, or it is damaged. "
1409 f" Processing data from the build {build[u'build']} ...")
1411 data = self._parse_tests(job, build, logs)
1415 f"Input data file from the job {job}, build "
1416 f"{build[u'build']} is damaged. Skipped.")
1419 state = u"processed"
1422 remove(build[u"file-name"])
1423 except OSError as err:
1425 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1429 # If the time-period is defined in the specification file, remove all
1430 # files which are outside the time period.
1431 timeperiod = self._cfg.input.get(u"time-period", None)
1432 if timeperiod and data:
1434 timeperiod = timedelta(int(timeperiod))
1435 metadata = data.get(u"metadata", None)
1437 generated = metadata.get(u"generated", None)
1439 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1440 if (now - generated) > timeperiod:
1441 # Remove the data and the file:
1446 f" The build {job}/{build[u'build']} is "
1447 f"outdated, will be removed.")
1449 logs.append((u"INFO", u" Done."))
1451 for level, line in logs:
1452 if level == u"INFO":
1454 elif level == u"ERROR":
1456 elif level == u"DEBUG":
1458 elif level == u"CRITICAL":
1459 logging.critical(line)
1460 elif level == u"WARNING":
1461 logging.warning(line)
1463 return {u"data": data, u"state": state, u"job": job, u"build": build}
1465 def download_and_parse_data(self, repeat=1):
1466 """Download the input data files, parse input data from input files and
1467 store in pandas' Series.
1469 :param repeat: Repeat the download specified number of times if not
1474 logging.info(u"Downloading and parsing input files ...")
1476 for job, builds in self._cfg.builds.items():
1477 for build in builds:
1479 result = self._download_and_parse_build(job, build, repeat)
1480 build_nr = result[u"build"][u"build"]
1483 data = result[u"data"]
1484 build_data = pd.Series({
1485 u"metadata": pd.Series(
1486 list(data[u"metadata"].values()),
1487 index=list(data[u"metadata"].keys())
1489 u"suites": pd.Series(
1490 list(data[u"suites"].values()),
1491 index=list(data[u"suites"].keys())
1493 u"tests": pd.Series(
1494 list(data[u"tests"].values()),
1495 index=list(data[u"tests"].keys())
1499 if self._input_data.get(job, None) is None:
1500 self._input_data[job] = pd.Series()
1501 self._input_data[job][str(build_nr)] = build_data
1503 self._cfg.set_input_file_name(
1504 job, build_nr, result[u"build"][u"file-name"])
1506 self._cfg.set_input_state(job, build_nr, result[u"state"])
1509 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1510 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1512 logging.info(u"Done.")
1515 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1516 """Return the index of character in the string which is the end of tag.
1518 :param tag_filter: The string where the end of tag is being searched.
1519 :param start: The index where the searching is stated.
1520 :param closer: The character which is the tag closer.
1521 :type tag_filter: str
1524 :returns: The index of the tag closer.
1529 idx_opener = tag_filter.index(closer, start)
1530 return tag_filter.index(closer, idx_opener + 1)
1535 def _condition(tag_filter):
1536 """Create a conditional statement from the given tag filter.
1538 :param tag_filter: Filter based on tags from the element specification.
1539 :type tag_filter: str
1540 :returns: Conditional statement which can be evaluated.
1546 index = InputData._end_of_tag(tag_filter, index)
1550 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1552 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1553 continue_on_error=False):
1554 """Filter required data from the given jobs and builds.
1556 The output data structure is:
1560 - test (or suite) 1 ID:
1566 - test (or suite) n ID:
1573 :param element: Element which will use the filtered data.
1574 :param params: Parameters which will be included in the output. If None,
1575 all parameters are included.
1576 :param data: If not None, this data is used instead of data specified
1578 :param data_set: The set of data to be filtered: tests, suites,
1580 :param continue_on_error: Continue if there is error while reading the
1581 data. The Item will be empty then
1582 :type element: pandas.Series
1586 :type continue_on_error: bool
1587 :returns: Filtered data.
1588 :rtype pandas.Series
1592 if data_set == "suites":
1594 elif element[u"filter"] in (u"all", u"template"):
1597 cond = InputData._condition(element[u"filter"])
1598 logging.debug(f" Filter: {cond}")
1600 logging.error(u" No filter defined.")
1604 params = element.get(u"parameters", None)
1606 params.append(u"type")
1608 data_to_filter = data if data else element[u"data"]
1611 for job, builds in data_to_filter.items():
1612 data[job] = pd.Series()
1613 for build in builds:
1614 data[job][str(build)] = pd.Series()
1617 self.data[job][str(build)][data_set].items())
1619 if continue_on_error:
1623 for test_id, test_data in data_dict.items():
1624 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1625 data[job][str(build)][test_id] = pd.Series()
1627 for param, val in test_data.items():
1628 data[job][str(build)][test_id][param] = val
1630 for param in params:
1632 data[job][str(build)][test_id][param] =\
1635 data[job][str(build)][test_id][param] =\
1639 except (KeyError, IndexError, ValueError) as err:
1641 f"Missing mandatory parameter in the element specification: "
1645 except AttributeError as err:
1646 logging.error(repr(err))
1648 except SyntaxError as err:
1650 f"The filter {cond} is not correct. Check if all tags are "
1651 f"enclosed by apostrophes.\n{repr(err)}"
1655 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1656 continue_on_error=False):
1657 """Filter required data from the given jobs and builds.
1659 The output data structure is:
1663 - test (or suite) 1 ID:
1669 - test (or suite) n ID:
1676 :param element: Element which will use the filtered data.
1677 :param params: Parameters which will be included in the output. If None,
1678 all parameters are included.
1679 :param data_set: The set of data to be filtered: tests, suites,
1681 :param continue_on_error: Continue if there is error while reading the
1682 data. The Item will be empty then
1683 :type element: pandas.Series
1686 :type continue_on_error: bool
1687 :returns: Filtered data.
1688 :rtype pandas.Series
1691 include = element.get(u"include", None)
1693 logging.warning(u"No tests to include, skipping the element.")
1697 params = element.get(u"parameters", None)
1699 params.append(u"type")
1703 for job, builds in element[u"data"].items():
1704 data[job] = pd.Series()
1705 for build in builds:
1706 data[job][str(build)] = pd.Series()
1707 for test in include:
1709 reg_ex = re.compile(str(test).lower())
1710 for test_id in self.data[job][
1711 str(build)][data_set].keys():
1712 if re.match(reg_ex, str(test_id).lower()):
1713 test_data = self.data[job][
1714 str(build)][data_set][test_id]
1715 data[job][str(build)][test_id] = pd.Series()
1717 for param, val in test_data.items():
1718 data[job][str(build)][test_id]\
1721 for param in params:
1723 data[job][str(build)][
1727 data[job][str(build)][
1728 test_id][param] = u"No Data"
1729 except KeyError as err:
1730 logging.error(repr(err))
1731 if continue_on_error:
1736 except (KeyError, IndexError, ValueError) as err:
1738 f"Missing mandatory parameter in the element "
1739 f"specification: {repr(err)}"
1742 except AttributeError as err:
1743 logging.error(repr(err))
1747 def merge_data(data):
1748 """Merge data from more jobs and builds to a simple data structure.
1750 The output data structure is:
1752 - test (suite) 1 ID:
1758 - test (suite) n ID:
1761 :param data: Data to merge.
1762 :type data: pandas.Series
1763 :returns: Merged data.
1764 :rtype: pandas.Series
1767 logging.info(u" Merging data ...")
1769 merged_data = pd.Series()
1770 for builds in data.values:
1771 for item in builds.values:
1772 for item_id, item_data in item.items():
1773 merged_data[item_id] = item_data
1777 def print_all_oper_data(self):
1778 """Print all operational data to console.
1786 u"Cycles per Packet",
1787 u"Average Vector Size"
1790 for job in self._input_data.values:
1791 for build in job.values:
1792 for test_id, test_data in build[u"tests"].items():
1794 if test_data.get(u"show-run", None) is None:
1796 for dut_name, data in test_data[u"show-run"].items():
1797 if data.get(u"threads", None) is None:
1799 print(f"Host IP: {data.get(u'host', '')}, "
1800 f"Socket: {data.get(u'socket', '')}")
1801 for thread_nr, thread in data[u"threads"].items():
1802 txt_table = prettytable.PrettyTable(tbl_hdr)
1805 txt_table.add_row(row)
1807 if len(thread) == 0:
1810 avg = f", Average Vector Size per Node: " \
1811 f"{(avg / len(thread)):.2f}"
1812 th_name = u"main" if thread_nr == 0 \
1813 else f"worker_{thread_nr}"
1814 print(f"{dut_name}, {th_name}{avg}")
1815 txt_table.float_format = u".2"
1816 txt_table.align = u"r"
1817 txt_table.align[u"Name"] = u"l"
1818 print(f"{txt_table.get_string()}\n")