1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
223 # TODO: Remove when not needed
224 REGEX_NDRPDR_LAT_BASE = re.compile(
225 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
226 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
228 REGEX_NDRPDR_LAT = re.compile(
229 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
230 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
231 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
236 # TODO: Remove when not needed
237 REGEX_NDRPDR_LAT_LONG = re.compile(
238 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
239 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
240 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
248 REGEX_VERSION_VPP = re.compile(
249 r"(return STDOUT Version:\s*|"
250 r"VPP Version:\s*|VPP version:\s*)(.*)"
252 REGEX_VERSION_DPDK = re.compile(
253 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
255 REGEX_TCP = re.compile(
256 r'Total\s(rps|cps|throughput):\s(\d*).*$'
258 REGEX_MRR = re.compile(
259 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
260 r'tx\s(\d*),\srx\s(\d*)'
262 REGEX_BMRR = re.compile(
263 r'Maximum Receive Rate trial results'
264 r' in packets per second: \[(.*)\]'
266 REGEX_RECONF_LOSS = re.compile(
267 r'Packets lost due to reconfig: (\d*)'
269 REGEX_RECONF_TIME = re.compile(
270 r'Implied time lost: (\d*.[\de-]*)'
272 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
274 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
276 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
278 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
280 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
282 def __init__(self, metadata, mapping, ignore):
285 :param metadata: Key-value pairs to be included in "metadata" part of
287 :param mapping: Mapping of the old names of test cases to the new
289 :param ignore: List of TCs to be ignored.
295 # Type of message to parse out from the test messages
296 self._msg_type = None
302 self._timestamp = None
304 # Testbed. The testbed is identified by TG node IP address.
307 # Mapping of TCs long names
308 self._mapping = mapping
311 self._ignore = ignore
313 # Number of PAPI History messages found:
315 # 1 - PAPI History of DUT1
316 # 2 - PAPI History of DUT2
317 self._conf_history_lookup_nr = 0
319 self._sh_run_counter = 0
321 # Test ID of currently processed test- the lowercase full path to the
325 # The main data structure
327 u"metadata": OrderedDict(),
328 u"suites": OrderedDict(),
329 u"tests": OrderedDict()
332 # Save the provided metadata
333 for key, val in metadata.items():
334 self._data[u"metadata"][key] = val
336 # Dictionary defining the methods used to parse different types of
339 u"timestamp": self._get_timestamp,
340 u"vpp-version": self._get_vpp_version,
341 u"dpdk-version": self._get_dpdk_version,
342 # TODO: Remove when not needed:
343 u"teardown-vat-history": self._get_vat_history,
344 u"teardown-papi-history": self._get_papi_history,
345 u"test-show-runtime": self._get_show_run,
346 u"testbed": self._get_testbed
351 """Getter - Data parsed from the XML file.
353 :returns: Data parsed from the XML file.
358 def _get_data_from_perf_test_msg(self, msg):
366 from message of NDRPDR performance tests.
368 :param msg: Message to be processed.
370 :returns: Processed message or original message if a problem occurs.
374 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
375 if not groups or groups.lastindex != 10:
380 u"ndr_low": float(groups.group(1)),
381 u"ndr_low_b": float(groups.group(2)),
382 u"pdr_low": float(groups.group(3)),
383 u"pdr_low_b": float(groups.group(4)),
384 u"pdr_lat_90_1": groups.group(5),
385 u"pdr_lat_90_2": groups.group(6),
386 u"pdr_lat_50_1": groups.group(7),
387 u"pdr_lat_50_2": groups.group(8),
388 u"pdr_lat_10_1": groups.group(9),
389 u"pdr_lat_10_2": groups.group(10),
391 except (AttributeError, IndexError, ValueError, KeyError):
394 def _process_lat(in_str_1, in_str_2):
395 """Extract min, avg, max values from latency string.
397 :param in_str_1: Latency string for one direction produced by robot
399 :param in_str_2: Latency string for second direction produced by
403 :returns: Processed latency string or empty string if a problem
405 :rtype: tuple(str, str)
407 in_list_1 = in_str_1.split('/', 3)
408 in_list_2 = in_str_2.split('/', 3)
410 if len(in_list_1) != 4 and len(in_list_2) != 4:
413 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
415 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
416 except hdrh.codec.HdrLengthException:
419 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
421 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
422 except hdrh.codec.HdrLengthException:
425 if hdr_lat_1 and hdr_lat_2:
426 hdr_lat_1_50 = hdr_lat_1.get_value_at_percentile(50.0)
427 hdr_lat_1_90 = hdr_lat_1.get_value_at_percentile(90.0)
428 hdr_lat_1_99 = hdr_lat_1.get_value_at_percentile(99.0)
429 hdr_lat_2_50 = hdr_lat_2.get_value_at_percentile(50.0)
430 hdr_lat_2_90 = hdr_lat_2.get_value_at_percentile(90.0)
431 hdr_lat_2_99 = hdr_lat_2.get_value_at_percentile(99.0)
433 if (hdr_lat_1_50 + hdr_lat_1_90 + hdr_lat_1_99 +
434 hdr_lat_2_50 + hdr_lat_2_90 + hdr_lat_2_99):
436 f"{hdr_lat_1_50} {hdr_lat_1_90} {hdr_lat_1_99} , "
437 f"{hdr_lat_2_50} {hdr_lat_2_90} {hdr_lat_2_99}"
443 pdr_lat_10 = _process_lat(data[u'pdr_lat_10_1'],
444 data[u'pdr_lat_10_2'])
445 pdr_lat_50 = _process_lat(data[u'pdr_lat_50_1'],
446 data[u'pdr_lat_50_2'])
447 pdr_lat_90 = _process_lat(data[u'pdr_lat_90_1'],
448 data[u'pdr_lat_90_2'])
449 pdr_lat_10 = f"\n3. {pdr_lat_10}" if pdr_lat_10 else u""
450 pdr_lat_50 = f"\n4. {pdr_lat_50}" if pdr_lat_50 else u""
451 pdr_lat_90 = f"\n5. {pdr_lat_90}" if pdr_lat_90 else u""
455 f"1. {(data[u'ndr_low'] / 1e6):.2f} {data[u'ndr_low_b']:.2f}"
456 f"\n2. {(data[u'pdr_low'] / 1e6):.2f} {data[u'pdr_low_b']:.2f}"
462 except (AttributeError, IndexError, ValueError, KeyError):
465 def _get_testbed(self, msg):
466 """Called when extraction of testbed IP is required.
467 The testbed is identified by TG node IP address.
469 :param msg: Message to process.
474 if msg.message.count(u"Setup of TG node") or \
475 msg.message.count(u"Setup of node TG host"):
476 reg_tg_ip = re.compile(
477 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
479 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
480 except (KeyError, ValueError, IndexError, AttributeError):
483 self._data[u"metadata"][u"testbed"] = self._testbed
484 self._msg_type = None
486 def _get_vpp_version(self, msg):
487 """Called when extraction of VPP version is required.
489 :param msg: Message to process.
494 if msg.message.count(u"return STDOUT Version:") or \
495 msg.message.count(u"VPP Version:") or \
496 msg.message.count(u"VPP version:"):
497 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
499 self._data[u"metadata"][u"version"] = self._version
500 self._msg_type = None
502 def _get_dpdk_version(self, msg):
503 """Called when extraction of DPDK version is required.
505 :param msg: Message to process.
510 if msg.message.count(u"DPDK Version:"):
512 self._version = str(re.search(
513 self.REGEX_VERSION_DPDK, msg.message).group(2))
514 self._data[u"metadata"][u"version"] = self._version
518 self._msg_type = None
520 def _get_timestamp(self, msg):
521 """Called when extraction of timestamp is required.
523 :param msg: Message to process.
528 self._timestamp = msg.timestamp[:14]
529 self._data[u"metadata"][u"generated"] = self._timestamp
530 self._msg_type = None
532 def _get_vat_history(self, msg):
533 """Called when extraction of VAT command history is required.
535 TODO: Remove when not needed.
537 :param msg: Message to process.
541 if msg.message.count(u"VAT command history:"):
542 self._conf_history_lookup_nr += 1
543 if self._conf_history_lookup_nr == 1:
544 self._data[u"tests"][self._test_id][u"conf-history"] = str()
546 self._msg_type = None
547 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
548 r"VAT command history:", u"",
549 msg.message, count=1).replace(u'\n', u' |br| ').\
552 self._data[u"tests"][self._test_id][u"conf-history"] += (
553 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
556 def _get_papi_history(self, msg):
557 """Called when extraction of PAPI command history is required.
559 :param msg: Message to process.
563 if msg.message.count(u"PAPI command history:"):
564 self._conf_history_lookup_nr += 1
565 if self._conf_history_lookup_nr == 1:
566 self._data[u"tests"][self._test_id][u"conf-history"] = str()
568 self._msg_type = None
569 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
570 r"PAPI command history:", u"",
571 msg.message, count=1).replace(u'\n', u' |br| ').\
573 self._data[u"tests"][self._test_id][u"conf-history"] += (
574 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
577 def _get_show_run(self, msg):
578 """Called when extraction of VPP operational data (output of CLI command
579 Show Runtime) is required.
581 :param msg: Message to process.
586 if not msg.message.count(u"stats runtime"):
590 if self._sh_run_counter > 1:
593 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
594 self._data[u"tests"][self._test_id][u"show-run"] = dict()
596 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
600 host = groups.group(1)
601 except (AttributeError, IndexError):
604 sock = groups.group(2)
605 except (AttributeError, IndexError):
608 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
609 replace(u"'", u'"').replace(u'b"', u'"').
610 replace(u'u"', u'"').split(u":", 1)[1])
613 threads_nr = len(runtime[0][u"clocks"])
614 except (IndexError, KeyError):
617 dut = u"DUT{nr}".format(
618 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
623 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
627 for idx in range(threads_nr):
628 if item[u"vectors"][idx] > 0:
629 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
630 elif item[u"calls"][idx] > 0:
631 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
632 elif item[u"suspends"][idx] > 0:
633 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
637 if item[u"calls"][idx] > 0:
638 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
642 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
643 int(item[u"suspends"][idx]):
644 oper[u"threads"][idx].append([
647 item[u"vectors"][idx],
648 item[u"suspends"][idx],
653 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
655 def _get_ndrpdr_throughput(self, msg):
656 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
659 :param msg: The test message to be parsed.
661 :returns: Parsed data as a dict and the status (PASS/FAIL).
662 :rtype: tuple(dict, str)
666 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
667 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
670 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
672 if groups is not None:
674 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
675 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
676 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
677 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
679 except (IndexError, ValueError):
682 return throughput, status
684 def _get_plr_throughput(self, msg):
685 """Get PLRsearch lower bound and PLRsearch upper bound from the test
688 :param msg: The test message to be parsed.
690 :returns: Parsed data as a dict and the status (PASS/FAIL).
691 :rtype: tuple(dict, str)
699 groups = re.search(self.REGEX_PLR_RATE, msg)
701 if groups is not None:
703 throughput[u"LOWER"] = float(groups.group(1))
704 throughput[u"UPPER"] = float(groups.group(2))
706 except (IndexError, ValueError):
709 return throughput, status
711 def _get_ndrpdr_latency(self, msg):
712 """Get LATENCY from the test message.
714 :param msg: The test message to be parsed.
716 :returns: Parsed data as a dict and the status (PASS/FAIL).
717 :rtype: tuple(dict, str)
727 u"direction1": copy.copy(latency_default),
728 u"direction2": copy.copy(latency_default)
731 u"direction1": copy.copy(latency_default),
732 u"direction2": copy.copy(latency_default)
735 u"direction1": copy.copy(latency_default),
736 u"direction2": copy.copy(latency_default)
739 u"direction1": copy.copy(latency_default),
740 u"direction2": copy.copy(latency_default)
743 u"direction1": copy.copy(latency_default),
744 u"direction2": copy.copy(latency_default)
747 u"direction1": copy.copy(latency_default),
748 u"direction2": copy.copy(latency_default)
752 # TODO: Rewrite when long and base are not needed
753 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
755 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
757 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
759 return latency, u"FAIL"
761 def process_latency(in_str):
762 """Return object with parsed latency values.
764 TODO: Define class for the return type.
766 :param in_str: Input string, min/avg/max/hdrh format.
768 :returns: Dict with corresponding keys, except hdrh float values.
770 :throws IndexError: If in_str does not have enough substrings.
771 :throws ValueError: If a substring does not convert to float.
773 in_list = in_str.split('/', 3)
776 u"min": float(in_list[0]),
777 u"avg": float(in_list[1]),
778 u"max": float(in_list[2]),
782 if len(in_list) == 4:
783 rval[u"hdrh"] = str(in_list[3])
788 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
789 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
790 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
791 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
792 if groups.lastindex == 4:
793 return latency, u"PASS"
794 except (IndexError, ValueError):
798 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
799 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
800 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
801 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
802 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
803 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
804 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
805 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
806 if groups.lastindex == 12:
807 return latency, u"PASS"
808 except (IndexError, ValueError):
811 # TODO: Remove when not needed
812 latency[u"NDR10"] = {
813 u"direction1": copy.copy(latency_default),
814 u"direction2": copy.copy(latency_default)
816 latency[u"NDR50"] = {
817 u"direction1": copy.copy(latency_default),
818 u"direction2": copy.copy(latency_default)
820 latency[u"NDR90"] = {
821 u"direction1": copy.copy(latency_default),
822 u"direction2": copy.copy(latency_default)
825 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
826 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
827 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
828 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
829 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
830 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
831 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
832 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
833 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
834 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
835 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
836 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
837 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
838 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
839 return latency, u"PASS"
840 except (IndexError, ValueError):
843 return latency, u"FAIL"
845 def visit_suite(self, suite):
846 """Implements traversing through the suite and its direct children.
848 :param suite: Suite to process.
852 if self.start_suite(suite) is not False:
853 suite.suites.visit(self)
854 suite.tests.visit(self)
855 self.end_suite(suite)
857 def start_suite(self, suite):
858 """Called when suite starts.
860 :param suite: Suite to process.
866 parent_name = suite.parent.name
867 except AttributeError:
870 doc_str = suite.doc.\
871 replace(u'"', u"'").\
872 replace(u'\n', u' ').\
873 replace(u'\r', u'').\
874 replace(u'*[', u' |br| *[').\
875 replace(u"*", u"**").\
876 replace(u' |br| *[', u'*[', 1)
878 self._data[u"suites"][suite.longname.lower().
880 replace(u" ", u"_")] = {
881 u"name": suite.name.lower(),
883 u"parent": parent_name,
884 u"level": len(suite.longname.split(u"."))
887 suite.keywords.visit(self)
889 def end_suite(self, suite):
890 """Called when suite ends.
892 :param suite: Suite to process.
897 def visit_test(self, test):
898 """Implements traversing through the test.
900 :param test: Test to process.
904 if self.start_test(test) is not False:
905 test.keywords.visit(self)
908 def start_test(self, test):
909 """Called when test starts.
911 :param test: Test to process.
916 self._sh_run_counter = 0
918 longname_orig = test.longname.lower()
920 # Check the ignore list
921 if longname_orig in self._ignore:
924 tags = [str(tag) for tag in test.tags]
927 # Change the TC long name and name if defined in the mapping table
928 longname = self._mapping.get(longname_orig, None)
929 if longname is not None:
930 name = longname.split(u'.')[-1]
932 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
936 longname = longname_orig
937 name = test.name.lower()
939 # Remove TC number from the TC long name (backward compatibility):
940 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
941 # Remove TC number from the TC name (not needed):
942 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
944 test_result[u"parent"] = test.parent.name.lower()
945 test_result[u"tags"] = tags
946 test_result["doc"] = test.doc.\
947 replace(u'"', u"'").\
948 replace(u'\n', u' ').\
949 replace(u'\r', u'').\
950 replace(u'[', u' |br| [').\
951 replace(u' |br| [', u'[', 1)
952 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
953 replace(u'\n', u' |br| ').\
954 replace(u'\r', u'').\
956 test_result[u"type"] = u"FUNC"
957 test_result[u"status"] = test.status
959 if u"PERFTEST" in tags:
960 # Replace info about cores (e.g. -1c-) with the info about threads
961 # and cores (e.g. -1t1c-) in the long test case names and in the
962 # test case names if necessary.
963 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
967 for tag in test_result[u"tags"]:
968 groups = re.search(self.REGEX_TC_TAG, tag)
974 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
975 f"-{tag_tc.lower()}-",
978 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
979 f"-{tag_tc.lower()}-",
983 test_result[u"status"] = u"FAIL"
984 self._data[u"tests"][self._test_id] = test_result
986 f"The test {self._test_id} has no or more than one "
987 f"multi-threading tags.\n"
988 f"Tags: {test_result[u'tags']}"
992 if test.status == u"PASS":
993 if u"NDRPDR" in tags:
994 test_result[u"type"] = u"NDRPDR"
995 test_result[u"throughput"], test_result[u"status"] = \
996 self._get_ndrpdr_throughput(test.message)
997 test_result[u"latency"], test_result[u"status"] = \
998 self._get_ndrpdr_latency(test.message)
999 elif u"SOAK" in tags:
1000 test_result[u"type"] = u"SOAK"
1001 test_result[u"throughput"], test_result[u"status"] = \
1002 self._get_plr_throughput(test.message)
1003 elif u"TCP" in tags:
1004 test_result[u"type"] = u"TCP"
1005 groups = re.search(self.REGEX_TCP, test.message)
1006 test_result[u"result"] = int(groups.group(2))
1007 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1009 test_result[u"type"] = u"MRR"
1011 test_result[u"type"] = u"BMRR"
1013 test_result[u"result"] = dict()
1014 groups = re.search(self.REGEX_BMRR, test.message)
1015 if groups is not None:
1016 items_str = groups.group(1)
1017 items_float = [float(item.strip()) for item
1018 in items_str.split(",")]
1019 # Use whole list in CSIT-1180.
1020 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1021 test_result[u"result"][u"receive-rate"] = stats.avg
1023 groups = re.search(self.REGEX_MRR, test.message)
1024 test_result[u"result"][u"receive-rate"] = \
1025 float(groups.group(3)) / float(groups.group(1))
1026 elif u"RECONF" in tags:
1027 test_result[u"type"] = u"RECONF"
1028 test_result[u"result"] = None
1030 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1031 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1032 test_result[u"result"] = {
1033 u"loss": int(grps_loss.group(1)),
1034 u"time": float(grps_time.group(1))
1036 except (AttributeError, IndexError, ValueError, TypeError):
1037 test_result[u"status"] = u"FAIL"
1038 elif u"DEVICETEST" in tags:
1039 test_result[u"type"] = u"DEVICETEST"
1041 test_result[u"status"] = u"FAIL"
1042 self._data[u"tests"][self._test_id] = test_result
1045 self._data[u"tests"][self._test_id] = test_result
1047 def end_test(self, test):
1048 """Called when test ends.
1050 :param test: Test to process.
1055 def visit_keyword(self, keyword):
1056 """Implements traversing through the keyword and its child keywords.
1058 :param keyword: Keyword to process.
1059 :type keyword: Keyword
1062 if self.start_keyword(keyword) is not False:
1063 self.end_keyword(keyword)
1065 def start_keyword(self, keyword):
1066 """Called when keyword starts. Default implementation does nothing.
1068 :param keyword: Keyword to process.
1069 :type keyword: Keyword
1073 if keyword.type == u"setup":
1074 self.visit_setup_kw(keyword)
1075 elif keyword.type == u"teardown":
1076 self.visit_teardown_kw(keyword)
1078 self.visit_test_kw(keyword)
1079 except AttributeError:
1082 def end_keyword(self, keyword):
1083 """Called when keyword ends. Default implementation does nothing.
1085 :param keyword: Keyword to process.
1086 :type keyword: Keyword
1090 def visit_test_kw(self, test_kw):
1091 """Implements traversing through the test keyword and its child
1094 :param test_kw: Keyword to process.
1095 :type test_kw: Keyword
1098 for keyword in test_kw.keywords:
1099 if self.start_test_kw(keyword) is not False:
1100 self.visit_test_kw(keyword)
1101 self.end_test_kw(keyword)
1103 def start_test_kw(self, test_kw):
1104 """Called when test keyword starts. Default implementation does
1107 :param test_kw: Keyword to process.
1108 :type test_kw: Keyword
1111 if test_kw.name.count(u"Show Runtime On All Duts") or \
1112 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1113 self._msg_type = u"test-show-runtime"
1114 self._sh_run_counter += 1
1115 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1116 self._msg_type = u"dpdk-version"
1119 test_kw.messages.visit(self)
1121 def end_test_kw(self, test_kw):
1122 """Called when keyword ends. Default implementation does nothing.
1124 :param test_kw: Keyword to process.
1125 :type test_kw: Keyword
1129 def visit_setup_kw(self, setup_kw):
1130 """Implements traversing through the teardown keyword and its child
1133 :param setup_kw: Keyword to process.
1134 :type setup_kw: Keyword
1137 for keyword in setup_kw.keywords:
1138 if self.start_setup_kw(keyword) is not False:
1139 self.visit_setup_kw(keyword)
1140 self.end_setup_kw(keyword)
1142 def start_setup_kw(self, setup_kw):
1143 """Called when teardown keyword starts. Default implementation does
1146 :param setup_kw: Keyword to process.
1147 :type setup_kw: Keyword
1150 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1151 and not self._version:
1152 self._msg_type = u"vpp-version"
1153 elif setup_kw.name.count(u"Set Global Variable") \
1154 and not self._timestamp:
1155 self._msg_type = u"timestamp"
1156 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1157 self._msg_type = u"testbed"
1160 setup_kw.messages.visit(self)
1162 def end_setup_kw(self, setup_kw):
1163 """Called when keyword ends. Default implementation does nothing.
1165 :param setup_kw: Keyword to process.
1166 :type setup_kw: Keyword
1170 def visit_teardown_kw(self, teardown_kw):
1171 """Implements traversing through the teardown keyword and its child
1174 :param teardown_kw: Keyword to process.
1175 :type teardown_kw: Keyword
1178 for keyword in teardown_kw.keywords:
1179 if self.start_teardown_kw(keyword) is not False:
1180 self.visit_teardown_kw(keyword)
1181 self.end_teardown_kw(keyword)
1183 def start_teardown_kw(self, teardown_kw):
1184 """Called when teardown keyword starts
1186 :param teardown_kw: Keyword to process.
1187 :type teardown_kw: Keyword
1191 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1192 # TODO: Remove when not needed:
1193 self._conf_history_lookup_nr = 0
1194 self._msg_type = u"teardown-vat-history"
1195 teardown_kw.messages.visit(self)
1196 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1197 self._conf_history_lookup_nr = 0
1198 self._msg_type = u"teardown-papi-history"
1199 teardown_kw.messages.visit(self)
1201 def end_teardown_kw(self, teardown_kw):
1202 """Called when keyword ends. Default implementation does nothing.
1204 :param teardown_kw: Keyword to process.
1205 :type teardown_kw: Keyword
1209 def visit_message(self, msg):
1210 """Implements visiting the message.
1212 :param msg: Message to process.
1216 if self.start_message(msg) is not False:
1217 self.end_message(msg)
1219 def start_message(self, msg):
1220 """Called when message starts. Get required information from messages:
1223 :param msg: Message to process.
1229 self.parse_msg[self._msg_type](msg)
1231 def end_message(self, msg):
1232 """Called when message ends. Default implementation does nothing.
1234 :param msg: Message to process.
1243 The data is extracted from output.xml files generated by Jenkins jobs and
1244 stored in pandas' DataFrames.
1250 (as described in ExecutionChecker documentation)
1252 (as described in ExecutionChecker documentation)
1254 (as described in ExecutionChecker documentation)
1257 def __init__(self, spec):
1260 :param spec: Specification.
1261 :type spec: Specification
1268 self._input_data = pd.Series()
1272 """Getter - Input data.
1274 :returns: Input data
1275 :rtype: pandas.Series
1277 return self._input_data
1279 def metadata(self, job, build):
1280 """Getter - metadata
1282 :param job: Job which metadata we want.
1283 :param build: Build which metadata we want.
1287 :rtype: pandas.Series
1290 return self.data[job][build][u"metadata"]
1292 def suites(self, job, build):
1295 :param job: Job which suites we want.
1296 :param build: Build which suites we want.
1300 :rtype: pandas.Series
1303 return self.data[job][str(build)][u"suites"]
1305 def tests(self, job, build):
1308 :param job: Job which tests we want.
1309 :param build: Build which tests we want.
1313 :rtype: pandas.Series
1316 return self.data[job][build][u"tests"]
1318 def _parse_tests(self, job, build, log):
1319 """Process data from robot output.xml file and return JSON structured
1322 :param job: The name of job which build output data will be processed.
1323 :param build: The build which output data will be processed.
1324 :param log: List of log messages.
1327 :type log: list of tuples (severity, msg)
1328 :returns: JSON data structure.
1337 with open(build[u"file-name"], u'r') as data_file:
1339 result = ExecutionResult(data_file)
1340 except errors.DataError as err:
1342 (u"ERROR", f"Error occurred while parsing output.xml: "
1346 checker = ExecutionChecker(metadata, self._cfg.mapping,
1348 result.visit(checker)
1352 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1353 """Download and parse the input data file.
1355 :param pid: PID of the process executing this method.
1356 :param job: Name of the Jenkins job which generated the processed input
1358 :param build: Information about the Jenkins build which generated the
1359 processed input file.
1360 :param repeat: Repeat the download specified number of times if not
1371 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1379 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1387 f"It is not possible to download the input data file from the "
1388 f"job {job}, build {build[u'build']}, or it is damaged. "
1394 f" Processing data from the build {build[u'build']} ...")
1396 data = self._parse_tests(job, build, logs)
1400 f"Input data file from the job {job}, build "
1401 f"{build[u'build']} is damaged. Skipped.")
1404 state = u"processed"
1407 remove(build[u"file-name"])
1408 except OSError as err:
1410 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1414 # If the time-period is defined in the specification file, remove all
1415 # files which are outside the time period.
1416 timeperiod = self._cfg.input.get(u"time-period", None)
1417 if timeperiod and data:
1419 timeperiod = timedelta(int(timeperiod))
1420 metadata = data.get(u"metadata", None)
1422 generated = metadata.get(u"generated", None)
1424 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1425 if (now - generated) > timeperiod:
1426 # Remove the data and the file:
1431 f" The build {job}/{build[u'build']} is "
1432 f"outdated, will be removed.")
1434 logs.append((u"INFO", u" Done."))
1436 for level, line in logs:
1437 if level == u"INFO":
1439 elif level == u"ERROR":
1441 elif level == u"DEBUG":
1443 elif level == u"CRITICAL":
1444 logging.critical(line)
1445 elif level == u"WARNING":
1446 logging.warning(line)
1448 return {u"data": data, u"state": state, u"job": job, u"build": build}
1450 def download_and_parse_data(self, repeat=1):
1451 """Download the input data files, parse input data from input files and
1452 store in pandas' Series.
1454 :param repeat: Repeat the download specified number of times if not
1459 logging.info(u"Downloading and parsing input files ...")
1461 for job, builds in self._cfg.builds.items():
1462 for build in builds:
1464 result = self._download_and_parse_build(job, build, repeat)
1465 build_nr = result[u"build"][u"build"]
1468 data = result[u"data"]
1469 build_data = pd.Series({
1470 u"metadata": pd.Series(
1471 list(data[u"metadata"].values()),
1472 index=list(data[u"metadata"].keys())
1474 u"suites": pd.Series(
1475 list(data[u"suites"].values()),
1476 index=list(data[u"suites"].keys())
1478 u"tests": pd.Series(
1479 list(data[u"tests"].values()),
1480 index=list(data[u"tests"].keys())
1484 if self._input_data.get(job, None) is None:
1485 self._input_data[job] = pd.Series()
1486 self._input_data[job][str(build_nr)] = build_data
1488 self._cfg.set_input_file_name(
1489 job, build_nr, result[u"build"][u"file-name"])
1491 self._cfg.set_input_state(job, build_nr, result[u"state"])
1494 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1495 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1497 logging.info(u"Done.")
1500 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1501 """Return the index of character in the string which is the end of tag.
1503 :param tag_filter: The string where the end of tag is being searched.
1504 :param start: The index where the searching is stated.
1505 :param closer: The character which is the tag closer.
1506 :type tag_filter: str
1509 :returns: The index of the tag closer.
1514 idx_opener = tag_filter.index(closer, start)
1515 return tag_filter.index(closer, idx_opener + 1)
1520 def _condition(tag_filter):
1521 """Create a conditional statement from the given tag filter.
1523 :param tag_filter: Filter based on tags from the element specification.
1524 :type tag_filter: str
1525 :returns: Conditional statement which can be evaluated.
1531 index = InputData._end_of_tag(tag_filter, index)
1535 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1537 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1538 continue_on_error=False):
1539 """Filter required data from the given jobs and builds.
1541 The output data structure is:
1545 - test (or suite) 1 ID:
1551 - test (or suite) n ID:
1558 :param element: Element which will use the filtered data.
1559 :param params: Parameters which will be included in the output. If None,
1560 all parameters are included.
1561 :param data: If not None, this data is used instead of data specified
1563 :param data_set: The set of data to be filtered: tests, suites,
1565 :param continue_on_error: Continue if there is error while reading the
1566 data. The Item will be empty then
1567 :type element: pandas.Series
1571 :type continue_on_error: bool
1572 :returns: Filtered data.
1573 :rtype pandas.Series
1577 if data_set == "suites":
1579 elif element[u"filter"] in (u"all", u"template"):
1582 cond = InputData._condition(element[u"filter"])
1583 logging.debug(f" Filter: {cond}")
1585 logging.error(u" No filter defined.")
1589 params = element.get(u"parameters", None)
1591 params.append(u"type")
1593 data_to_filter = data if data else element[u"data"]
1596 for job, builds in data_to_filter.items():
1597 data[job] = pd.Series()
1598 for build in builds:
1599 data[job][str(build)] = pd.Series()
1602 self.data[job][str(build)][data_set].items())
1604 if continue_on_error:
1608 for test_id, test_data in data_dict.items():
1609 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1610 data[job][str(build)][test_id] = pd.Series()
1612 for param, val in test_data.items():
1613 data[job][str(build)][test_id][param] = val
1615 for param in params:
1617 data[job][str(build)][test_id][param] =\
1620 data[job][str(build)][test_id][param] =\
1624 except (KeyError, IndexError, ValueError) as err:
1626 f"Missing mandatory parameter in the element specification: "
1630 except AttributeError as err:
1631 logging.error(repr(err))
1633 except SyntaxError as err:
1635 f"The filter {cond} is not correct. Check if all tags are "
1636 f"enclosed by apostrophes.\n{repr(err)}"
1640 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1641 continue_on_error=False):
1642 """Filter required data from the given jobs and builds.
1644 The output data structure is:
1648 - test (or suite) 1 ID:
1654 - test (or suite) n ID:
1661 :param element: Element which will use the filtered data.
1662 :param params: Parameters which will be included in the output. If None,
1663 all parameters are included.
1664 :param data_set: The set of data to be filtered: tests, suites,
1666 :param continue_on_error: Continue if there is error while reading the
1667 data. The Item will be empty then
1668 :type element: pandas.Series
1671 :type continue_on_error: bool
1672 :returns: Filtered data.
1673 :rtype pandas.Series
1676 include = element.get(u"include", None)
1678 logging.warning(u"No tests to include, skipping the element.")
1682 params = element.get(u"parameters", None)
1684 params.append(u"type")
1688 for job, builds in element[u"data"].items():
1689 data[job] = pd.Series()
1690 for build in builds:
1691 data[job][str(build)] = pd.Series()
1692 for test in include:
1694 reg_ex = re.compile(str(test).lower())
1695 for test_id in self.data[job][
1696 str(build)][data_set].keys():
1697 if re.match(reg_ex, str(test_id).lower()):
1698 test_data = self.data[job][
1699 str(build)][data_set][test_id]
1700 data[job][str(build)][test_id] = pd.Series()
1702 for param, val in test_data.items():
1703 data[job][str(build)][test_id]\
1706 for param in params:
1708 data[job][str(build)][
1712 data[job][str(build)][
1713 test_id][param] = u"No Data"
1714 except KeyError as err:
1715 logging.error(repr(err))
1716 if continue_on_error:
1721 except (KeyError, IndexError, ValueError) as err:
1723 f"Missing mandatory parameter in the element "
1724 f"specification: {repr(err)}"
1727 except AttributeError as err:
1728 logging.error(repr(err))
1732 def merge_data(data):
1733 """Merge data from more jobs and builds to a simple data structure.
1735 The output data structure is:
1737 - test (suite) 1 ID:
1743 - test (suite) n ID:
1746 :param data: Data to merge.
1747 :type data: pandas.Series
1748 :returns: Merged data.
1749 :rtype: pandas.Series
1752 logging.info(u" Merging data ...")
1754 merged_data = pd.Series()
1755 for builds in data.values:
1756 for item in builds.values:
1757 for item_id, item_data in item.items():
1758 merged_data[item_id] = item_data
1762 def print_all_oper_data(self):
1763 """Print all operational data to console.
1771 u"Cycles per Packet",
1772 u"Average Vector Size"
1775 for job in self._input_data.values:
1776 for build in job.values:
1777 for test_id, test_data in build[u"tests"].items():
1779 if test_data.get(u"show-run", None) is None:
1781 for dut_name, data in test_data[u"show-run"].items():
1782 if data.get(u"threads", None) is None:
1784 print(f"Host IP: {data.get(u'host', '')}, "
1785 f"Socket: {data.get(u'socket', '')}")
1786 for thread_nr, thread in data[u"threads"].items():
1787 txt_table = prettytable.PrettyTable(tbl_hdr)
1790 txt_table.add_row(row)
1792 if len(thread) == 0:
1795 avg = f", Average Vector Size per Node: " \
1796 f"{(avg / len(thread)):.2f}"
1797 th_name = u"main" if thread_nr == 0 \
1798 else f"worker_{thread_nr}"
1799 print(f"{dut_name}, {th_name}{avg}")
1800 txt_table.float_format = u".2"
1801 txt_table.align = u"r"
1802 txt_table.align[u"Name"] = u"l"
1803 print(f"{txt_table.get_string()}\n")