1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
223 # TODO: Remove when not needed
224 REGEX_NDRPDR_LAT_BASE = re.compile(
225 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
226 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
228 REGEX_NDRPDR_LAT = re.compile(
229 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
230 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
231 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
236 # TODO: Remove when not needed
237 REGEX_NDRPDR_LAT_LONG = re.compile(
238 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
239 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
240 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
248 REGEX_VERSION_VPP = re.compile(
249 r"(return STDOUT Version:\s*|"
250 r"VPP Version:\s*|VPP version:\s*)(.*)"
252 REGEX_VERSION_DPDK = re.compile(
253 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
255 REGEX_TCP = re.compile(
256 r'Total\s(rps|cps|throughput):\s(\d*).*$'
258 REGEX_MRR = re.compile(
259 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
260 r'tx\s(\d*),\srx\s(\d*)'
262 REGEX_BMRR = re.compile(
263 r'Maximum Receive Rate trial results'
264 r' in packets per second: \[(.*)\]'
266 REGEX_RECONF_LOSS = re.compile(
267 r'Packets lost due to reconfig: (\d*)'
269 REGEX_RECONF_TIME = re.compile(
270 r'Implied time lost: (\d*.[\de-]*)'
272 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
274 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
276 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
278 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
280 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
282 def __init__(self, metadata, mapping, ignore):
285 :param metadata: Key-value pairs to be included in "metadata" part of
287 :param mapping: Mapping of the old names of test cases to the new
289 :param ignore: List of TCs to be ignored.
295 # Type of message to parse out from the test messages
296 self._msg_type = None
302 self._timestamp = None
304 # Testbed. The testbed is identified by TG node IP address.
307 # Mapping of TCs long names
308 self._mapping = mapping
311 self._ignore = ignore
313 # Number of PAPI History messages found:
315 # 1 - PAPI History of DUT1
316 # 2 - PAPI History of DUT2
317 self._conf_history_lookup_nr = 0
319 self._sh_run_counter = 0
321 # Test ID of currently processed test- the lowercase full path to the
325 # The main data structure
327 u"metadata": OrderedDict(),
328 u"suites": OrderedDict(),
329 u"tests": OrderedDict()
332 # Save the provided metadata
333 for key, val in metadata.items():
334 self._data[u"metadata"][key] = val
336 # Dictionary defining the methods used to parse different types of
339 u"timestamp": self._get_timestamp,
340 u"vpp-version": self._get_vpp_version,
341 u"dpdk-version": self._get_dpdk_version,
342 # TODO: Remove when not needed:
343 u"teardown-vat-history": self._get_vat_history,
344 u"teardown-papi-history": self._get_papi_history,
345 u"test-show-runtime": self._get_show_run,
346 u"testbed": self._get_testbed
351 """Getter - Data parsed from the XML file.
353 :returns: Data parsed from the XML file.
358 def _get_data_from_perf_test_msg(self, msg):
366 from message of NDRPDR performance tests.
368 :param msg: Message to be processed.
370 :returns: Processed message or original message if a problem occurs.
374 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
375 if not groups or groups.lastindex != 10:
380 u"ndr_low": float(groups.group(1)),
381 u"ndr_low_b": float(groups.group(2)),
382 u"pdr_low": float(groups.group(3)),
383 u"pdr_low_b": float(groups.group(4)),
384 u"pdr_lat_90_1": groups.group(5),
385 u"pdr_lat_90_2": groups.group(6),
386 u"pdr_lat_50_1": groups.group(7),
387 u"pdr_lat_50_2": groups.group(8),
388 u"pdr_lat_10_1": groups.group(9),
389 u"pdr_lat_10_2": groups.group(10),
391 except (AttributeError, IndexError, ValueError, KeyError):
394 def _process_lat(in_str_1, in_str_2):
395 """Extract min, avg, max values from latency string.
397 :param in_str_1: Latency string for one direction produced by robot
399 :param in_str_2: Latency string for second direction produced by
403 :returns: Processed latency string or empty string if a problem
405 :rtype: tuple(str, str)
407 in_list_1 = in_str_1.split('/', 3)
408 in_list_2 = in_str_2.split('/', 3)
410 if len(in_list_1) != 4 and len(in_list_2) != 4:
413 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
415 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
416 except hdrh.codec.HdrLengthException:
419 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
421 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
422 except hdrh.codec.HdrLengthException:
425 if hdr_lat_1 and hdr_lat_2:
427 f"{hdr_lat_1.get_value_at_percentile(50.0)} "
428 f"{hdr_lat_1.get_value_at_percentile(90.0)} "
429 f"{hdr_lat_1.get_value_at_percentile(99.0)} , "
430 f"{hdr_lat_2.get_value_at_percentile(50.0)} "
431 f"{hdr_lat_2.get_value_at_percentile(90.0)} "
432 f"{hdr_lat_2.get_value_at_percentile(99.0)}"
438 pdr_lat_10 = _process_lat(data[u'pdr_lat_10_1'],
439 data[u'pdr_lat_10_2'])
440 pdr_lat_50 = _process_lat(data[u'pdr_lat_50_1'],
441 data[u'pdr_lat_50_2'])
442 pdr_lat_90 = _process_lat(data[u'pdr_lat_90_1'],
443 data[u'pdr_lat_90_2'])
444 pdr_lat_10 = f"\n3. {pdr_lat_10}" if pdr_lat_10 else u""
445 pdr_lat_50 = f"\n4. {pdr_lat_50}" if pdr_lat_50 else u""
446 pdr_lat_90 = f"\n5. {pdr_lat_90}" if pdr_lat_90 else u""
450 f"1. {(data[u'ndr_low'] / 1e6):.2f} {data[u'ndr_low_b']:.2f}"
451 f"\n2. {(data[u'pdr_low'] / 1e6):.2f} {data[u'pdr_low_b']:.2f}"
457 except (AttributeError, IndexError, ValueError, KeyError):
460 def _get_testbed(self, msg):
461 """Called when extraction of testbed IP is required.
462 The testbed is identified by TG node IP address.
464 :param msg: Message to process.
469 if msg.message.count(u"Setup of TG node") or \
470 msg.message.count(u"Setup of node TG host"):
471 reg_tg_ip = re.compile(
472 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
474 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
475 except (KeyError, ValueError, IndexError, AttributeError):
478 self._data[u"metadata"][u"testbed"] = self._testbed
479 self._msg_type = None
481 def _get_vpp_version(self, msg):
482 """Called when extraction of VPP version is required.
484 :param msg: Message to process.
489 if msg.message.count(u"return STDOUT Version:") or \
490 msg.message.count(u"VPP Version:") or \
491 msg.message.count(u"VPP version:"):
492 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
494 self._data[u"metadata"][u"version"] = self._version
495 self._msg_type = None
497 def _get_dpdk_version(self, msg):
498 """Called when extraction of DPDK version is required.
500 :param msg: Message to process.
505 if msg.message.count(u"DPDK Version:"):
507 self._version = str(re.search(
508 self.REGEX_VERSION_DPDK, msg.message).group(2))
509 self._data[u"metadata"][u"version"] = self._version
513 self._msg_type = None
515 def _get_timestamp(self, msg):
516 """Called when extraction of timestamp is required.
518 :param msg: Message to process.
523 self._timestamp = msg.timestamp[:14]
524 self._data[u"metadata"][u"generated"] = self._timestamp
525 self._msg_type = None
527 def _get_vat_history(self, msg):
528 """Called when extraction of VAT command history is required.
530 TODO: Remove when not needed.
532 :param msg: Message to process.
536 if msg.message.count(u"VAT command history:"):
537 self._conf_history_lookup_nr += 1
538 if self._conf_history_lookup_nr == 1:
539 self._data[u"tests"][self._test_id][u"conf-history"] = str()
541 self._msg_type = None
542 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
543 r"VAT command history:", u"",
544 msg.message, count=1).replace(u'\n', u' |br| ').\
547 self._data[u"tests"][self._test_id][u"conf-history"] += (
548 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
551 def _get_papi_history(self, msg):
552 """Called when extraction of PAPI command history is required.
554 :param msg: Message to process.
558 if msg.message.count(u"PAPI command history:"):
559 self._conf_history_lookup_nr += 1
560 if self._conf_history_lookup_nr == 1:
561 self._data[u"tests"][self._test_id][u"conf-history"] = str()
563 self._msg_type = None
564 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
565 r"PAPI command history:", u"",
566 msg.message, count=1).replace(u'\n', u' |br| ').\
568 self._data[u"tests"][self._test_id][u"conf-history"] += (
569 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
572 def _get_show_run(self, msg):
573 """Called when extraction of VPP operational data (output of CLI command
574 Show Runtime) is required.
576 :param msg: Message to process.
581 if not msg.message.count(u"stats runtime"):
585 if self._sh_run_counter > 1:
588 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
589 self._data[u"tests"][self._test_id][u"show-run"] = dict()
591 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
595 host = groups.group(1)
596 except (AttributeError, IndexError):
599 sock = groups.group(2)
600 except (AttributeError, IndexError):
603 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
604 replace(u"'", u'"').replace(u'b"', u'"').
605 replace(u'u"', u'"').split(u":", 1)[1])
608 threads_nr = len(runtime[0][u"clocks"])
609 except (IndexError, KeyError):
612 dut = u"DUT{nr}".format(
613 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
618 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
622 for idx in range(threads_nr):
623 if item[u"vectors"][idx] > 0:
624 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
625 elif item[u"calls"][idx] > 0:
626 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
627 elif item[u"suspends"][idx] > 0:
628 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
632 if item[u"calls"][idx] > 0:
633 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
637 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
638 int(item[u"suspends"][idx]):
639 oper[u"threads"][idx].append([
642 item[u"vectors"][idx],
643 item[u"suspends"][idx],
648 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
650 def _get_ndrpdr_throughput(self, msg):
651 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
654 :param msg: The test message to be parsed.
656 :returns: Parsed data as a dict and the status (PASS/FAIL).
657 :rtype: tuple(dict, str)
661 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
662 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
665 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
667 if groups is not None:
669 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
670 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
671 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
672 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
674 except (IndexError, ValueError):
677 return throughput, status
679 def _get_plr_throughput(self, msg):
680 """Get PLRsearch lower bound and PLRsearch upper bound from the test
683 :param msg: The test message to be parsed.
685 :returns: Parsed data as a dict and the status (PASS/FAIL).
686 :rtype: tuple(dict, str)
694 groups = re.search(self.REGEX_PLR_RATE, msg)
696 if groups is not None:
698 throughput[u"LOWER"] = float(groups.group(1))
699 throughput[u"UPPER"] = float(groups.group(2))
701 except (IndexError, ValueError):
704 return throughput, status
706 def _get_ndrpdr_latency(self, msg):
707 """Get LATENCY from the test message.
709 :param msg: The test message to be parsed.
711 :returns: Parsed data as a dict and the status (PASS/FAIL).
712 :rtype: tuple(dict, str)
722 u"direction1": copy.copy(latency_default),
723 u"direction2": copy.copy(latency_default)
726 u"direction1": copy.copy(latency_default),
727 u"direction2": copy.copy(latency_default)
730 u"direction1": copy.copy(latency_default),
731 u"direction2": copy.copy(latency_default)
734 u"direction1": copy.copy(latency_default),
735 u"direction2": copy.copy(latency_default)
738 u"direction1": copy.copy(latency_default),
739 u"direction2": copy.copy(latency_default)
742 u"direction1": copy.copy(latency_default),
743 u"direction2": copy.copy(latency_default)
747 # TODO: Rewrite when long and base are not needed
748 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
750 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
752 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
754 return latency, u"FAIL"
756 def process_latency(in_str):
757 """Return object with parsed latency values.
759 TODO: Define class for the return type.
761 :param in_str: Input string, min/avg/max/hdrh format.
763 :returns: Dict with corresponding keys, except hdrh float values.
765 :throws IndexError: If in_str does not have enough substrings.
766 :throws ValueError: If a substring does not convert to float.
768 in_list = in_str.split('/', 3)
771 u"min": float(in_list[0]),
772 u"avg": float(in_list[1]),
773 u"max": float(in_list[2]),
777 if len(in_list) == 4:
778 rval[u"hdrh"] = str(in_list[3])
783 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
784 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
785 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
786 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
787 if groups.lastindex == 4:
788 return latency, u"PASS"
789 except (IndexError, ValueError):
793 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
794 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
795 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
796 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
797 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
798 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
799 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
800 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
801 if groups.lastindex == 12:
802 return latency, u"PASS"
803 except (IndexError, ValueError):
806 # TODO: Remove when not needed
807 latency[u"NDR10"] = {
808 u"direction1": copy.copy(latency_default),
809 u"direction2": copy.copy(latency_default)
811 latency[u"NDR50"] = {
812 u"direction1": copy.copy(latency_default),
813 u"direction2": copy.copy(latency_default)
815 latency[u"NDR90"] = {
816 u"direction1": copy.copy(latency_default),
817 u"direction2": copy.copy(latency_default)
820 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
821 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
822 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
823 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
824 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
825 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
826 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
827 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
828 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
829 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
830 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
831 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
832 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
833 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
834 return latency, u"PASS"
835 except (IndexError, ValueError):
838 return latency, u"FAIL"
840 def visit_suite(self, suite):
841 """Implements traversing through the suite and its direct children.
843 :param suite: Suite to process.
847 if self.start_suite(suite) is not False:
848 suite.suites.visit(self)
849 suite.tests.visit(self)
850 self.end_suite(suite)
852 def start_suite(self, suite):
853 """Called when suite starts.
855 :param suite: Suite to process.
861 parent_name = suite.parent.name
862 except AttributeError:
865 doc_str = suite.doc.\
866 replace(u'"', u"'").\
867 replace(u'\n', u' ').\
868 replace(u'\r', u'').\
869 replace(u'*[', u' |br| *[').\
870 replace(u"*", u"**").\
871 replace(u' |br| *[', u'*[', 1)
873 self._data[u"suites"][suite.longname.lower().
875 replace(u" ", u"_")] = {
876 u"name": suite.name.lower(),
878 u"parent": parent_name,
879 u"level": len(suite.longname.split(u"."))
882 suite.keywords.visit(self)
884 def end_suite(self, suite):
885 """Called when suite ends.
887 :param suite: Suite to process.
892 def visit_test(self, test):
893 """Implements traversing through the test.
895 :param test: Test to process.
899 if self.start_test(test) is not False:
900 test.keywords.visit(self)
903 def start_test(self, test):
904 """Called when test starts.
906 :param test: Test to process.
911 self._sh_run_counter = 0
913 longname_orig = test.longname.lower()
915 # Check the ignore list
916 if longname_orig in self._ignore:
919 tags = [str(tag) for tag in test.tags]
922 # Change the TC long name and name if defined in the mapping table
923 longname = self._mapping.get(longname_orig, None)
924 if longname is not None:
925 name = longname.split(u'.')[-1]
927 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
931 longname = longname_orig
932 name = test.name.lower()
934 # Remove TC number from the TC long name (backward compatibility):
935 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
936 # Remove TC number from the TC name (not needed):
937 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
939 test_result[u"parent"] = test.parent.name.lower()
940 test_result[u"tags"] = tags
941 test_result["doc"] = test.doc.\
942 replace(u'"', u"'").\
943 replace(u'\n', u' ').\
944 replace(u'\r', u'').\
945 replace(u'[', u' |br| [').\
946 replace(u' |br| [', u'[', 1)
947 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
948 replace(u'\n', u' |br| ').\
949 replace(u'\r', u'').\
951 test_result[u"type"] = u"FUNC"
952 test_result[u"status"] = test.status
954 if u"PERFTEST" in tags:
955 # Replace info about cores (e.g. -1c-) with the info about threads
956 # and cores (e.g. -1t1c-) in the long test case names and in the
957 # test case names if necessary.
958 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
962 for tag in test_result[u"tags"]:
963 groups = re.search(self.REGEX_TC_TAG, tag)
969 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
970 f"-{tag_tc.lower()}-",
973 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
974 f"-{tag_tc.lower()}-",
978 test_result[u"status"] = u"FAIL"
979 self._data[u"tests"][self._test_id] = test_result
981 f"The test {self._test_id} has no or more than one "
982 f"multi-threading tags.\n"
983 f"Tags: {test_result[u'tags']}"
987 if test.status == u"PASS":
988 if u"NDRPDR" in tags:
989 test_result[u"type"] = u"NDRPDR"
990 test_result[u"throughput"], test_result[u"status"] = \
991 self._get_ndrpdr_throughput(test.message)
992 test_result[u"latency"], test_result[u"status"] = \
993 self._get_ndrpdr_latency(test.message)
994 elif u"SOAK" in tags:
995 test_result[u"type"] = u"SOAK"
996 test_result[u"throughput"], test_result[u"status"] = \
997 self._get_plr_throughput(test.message)
999 test_result[u"type"] = u"TCP"
1000 groups = re.search(self.REGEX_TCP, test.message)
1001 test_result[u"result"] = int(groups.group(2))
1002 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1004 test_result[u"type"] = u"MRR"
1006 test_result[u"type"] = u"BMRR"
1008 test_result[u"result"] = dict()
1009 groups = re.search(self.REGEX_BMRR, test.message)
1010 if groups is not None:
1011 items_str = groups.group(1)
1012 items_float = [float(item.strip()) for item
1013 in items_str.split(",")]
1014 # Use whole list in CSIT-1180.
1015 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1016 test_result[u"result"][u"receive-rate"] = stats.avg
1018 groups = re.search(self.REGEX_MRR, test.message)
1019 test_result[u"result"][u"receive-rate"] = \
1020 float(groups.group(3)) / float(groups.group(1))
1021 elif u"RECONF" in tags:
1022 test_result[u"type"] = u"RECONF"
1023 test_result[u"result"] = None
1025 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1026 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1027 test_result[u"result"] = {
1028 u"loss": int(grps_loss.group(1)),
1029 u"time": float(grps_time.group(1))
1031 except (AttributeError, IndexError, ValueError, TypeError):
1032 test_result[u"status"] = u"FAIL"
1034 test_result[u"status"] = u"FAIL"
1035 self._data[u"tests"][self._test_id] = test_result
1038 self._data[u"tests"][self._test_id] = test_result
1040 def end_test(self, test):
1041 """Called when test ends.
1043 :param test: Test to process.
1048 def visit_keyword(self, keyword):
1049 """Implements traversing through the keyword and its child keywords.
1051 :param keyword: Keyword to process.
1052 :type keyword: Keyword
1055 if self.start_keyword(keyword) is not False:
1056 self.end_keyword(keyword)
1058 def start_keyword(self, keyword):
1059 """Called when keyword starts. Default implementation does nothing.
1061 :param keyword: Keyword to process.
1062 :type keyword: Keyword
1066 if keyword.type == u"setup":
1067 self.visit_setup_kw(keyword)
1068 elif keyword.type == u"teardown":
1069 self.visit_teardown_kw(keyword)
1071 self.visit_test_kw(keyword)
1072 except AttributeError:
1075 def end_keyword(self, keyword):
1076 """Called when keyword ends. Default implementation does nothing.
1078 :param keyword: Keyword to process.
1079 :type keyword: Keyword
1083 def visit_test_kw(self, test_kw):
1084 """Implements traversing through the test keyword and its child
1087 :param test_kw: Keyword to process.
1088 :type test_kw: Keyword
1091 for keyword in test_kw.keywords:
1092 if self.start_test_kw(keyword) is not False:
1093 self.visit_test_kw(keyword)
1094 self.end_test_kw(keyword)
1096 def start_test_kw(self, test_kw):
1097 """Called when test keyword starts. Default implementation does
1100 :param test_kw: Keyword to process.
1101 :type test_kw: Keyword
1104 if test_kw.name.count(u"Show Runtime On All Duts") or \
1105 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1106 self._msg_type = u"test-show-runtime"
1107 self._sh_run_counter += 1
1108 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1109 self._msg_type = u"dpdk-version"
1112 test_kw.messages.visit(self)
1114 def end_test_kw(self, test_kw):
1115 """Called when keyword ends. Default implementation does nothing.
1117 :param test_kw: Keyword to process.
1118 :type test_kw: Keyword
1122 def visit_setup_kw(self, setup_kw):
1123 """Implements traversing through the teardown keyword and its child
1126 :param setup_kw: Keyword to process.
1127 :type setup_kw: Keyword
1130 for keyword in setup_kw.keywords:
1131 if self.start_setup_kw(keyword) is not False:
1132 self.visit_setup_kw(keyword)
1133 self.end_setup_kw(keyword)
1135 def start_setup_kw(self, setup_kw):
1136 """Called when teardown keyword starts. Default implementation does
1139 :param setup_kw: Keyword to process.
1140 :type setup_kw: Keyword
1143 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1144 and not self._version:
1145 self._msg_type = u"vpp-version"
1146 elif setup_kw.name.count(u"Set Global Variable") \
1147 and not self._timestamp:
1148 self._msg_type = u"timestamp"
1149 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1150 self._msg_type = u"testbed"
1153 setup_kw.messages.visit(self)
1155 def end_setup_kw(self, setup_kw):
1156 """Called when keyword ends. Default implementation does nothing.
1158 :param setup_kw: Keyword to process.
1159 :type setup_kw: Keyword
1163 def visit_teardown_kw(self, teardown_kw):
1164 """Implements traversing through the teardown keyword and its child
1167 :param teardown_kw: Keyword to process.
1168 :type teardown_kw: Keyword
1171 for keyword in teardown_kw.keywords:
1172 if self.start_teardown_kw(keyword) is not False:
1173 self.visit_teardown_kw(keyword)
1174 self.end_teardown_kw(keyword)
1176 def start_teardown_kw(self, teardown_kw):
1177 """Called when teardown keyword starts
1179 :param teardown_kw: Keyword to process.
1180 :type teardown_kw: Keyword
1184 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1185 # TODO: Remove when not needed:
1186 self._conf_history_lookup_nr = 0
1187 self._msg_type = u"teardown-vat-history"
1188 teardown_kw.messages.visit(self)
1189 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1190 self._conf_history_lookup_nr = 0
1191 self._msg_type = u"teardown-papi-history"
1192 teardown_kw.messages.visit(self)
1194 def end_teardown_kw(self, teardown_kw):
1195 """Called when keyword ends. Default implementation does nothing.
1197 :param teardown_kw: Keyword to process.
1198 :type teardown_kw: Keyword
1202 def visit_message(self, msg):
1203 """Implements visiting the message.
1205 :param msg: Message to process.
1209 if self.start_message(msg) is not False:
1210 self.end_message(msg)
1212 def start_message(self, msg):
1213 """Called when message starts. Get required information from messages:
1216 :param msg: Message to process.
1222 self.parse_msg[self._msg_type](msg)
1224 def end_message(self, msg):
1225 """Called when message ends. Default implementation does nothing.
1227 :param msg: Message to process.
1236 The data is extracted from output.xml files generated by Jenkins jobs and
1237 stored in pandas' DataFrames.
1243 (as described in ExecutionChecker documentation)
1245 (as described in ExecutionChecker documentation)
1247 (as described in ExecutionChecker documentation)
1250 def __init__(self, spec):
1253 :param spec: Specification.
1254 :type spec: Specification
1261 self._input_data = pd.Series()
1265 """Getter - Input data.
1267 :returns: Input data
1268 :rtype: pandas.Series
1270 return self._input_data
1272 def metadata(self, job, build):
1273 """Getter - metadata
1275 :param job: Job which metadata we want.
1276 :param build: Build which metadata we want.
1280 :rtype: pandas.Series
1283 return self.data[job][build][u"metadata"]
1285 def suites(self, job, build):
1288 :param job: Job which suites we want.
1289 :param build: Build which suites we want.
1293 :rtype: pandas.Series
1296 return self.data[job][str(build)][u"suites"]
1298 def tests(self, job, build):
1301 :param job: Job which tests we want.
1302 :param build: Build which tests we want.
1306 :rtype: pandas.Series
1309 return self.data[job][build][u"tests"]
1311 def _parse_tests(self, job, build, log):
1312 """Process data from robot output.xml file and return JSON structured
1315 :param job: The name of job which build output data will be processed.
1316 :param build: The build which output data will be processed.
1317 :param log: List of log messages.
1320 :type log: list of tuples (severity, msg)
1321 :returns: JSON data structure.
1330 with open(build[u"file-name"], u'r') as data_file:
1332 result = ExecutionResult(data_file)
1333 except errors.DataError as err:
1335 (u"ERROR", f"Error occurred while parsing output.xml: "
1339 checker = ExecutionChecker(metadata, self._cfg.mapping,
1341 result.visit(checker)
1345 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1346 """Download and parse the input data file.
1348 :param pid: PID of the process executing this method.
1349 :param job: Name of the Jenkins job which generated the processed input
1351 :param build: Information about the Jenkins build which generated the
1352 processed input file.
1353 :param repeat: Repeat the download specified number of times if not
1364 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1372 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1380 f"It is not possible to download the input data file from the "
1381 f"job {job}, build {build[u'build']}, or it is damaged. "
1387 f" Processing data from the build {build[u'build']} ...")
1389 data = self._parse_tests(job, build, logs)
1393 f"Input data file from the job {job}, build "
1394 f"{build[u'build']} is damaged. Skipped.")
1397 state = u"processed"
1400 remove(build[u"file-name"])
1401 except OSError as err:
1403 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1407 # If the time-period is defined in the specification file, remove all
1408 # files which are outside the time period.
1409 timeperiod = self._cfg.input.get(u"time-period", None)
1410 if timeperiod and data:
1412 timeperiod = timedelta(int(timeperiod))
1413 metadata = data.get(u"metadata", None)
1415 generated = metadata.get(u"generated", None)
1417 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1418 if (now - generated) > timeperiod:
1419 # Remove the data and the file:
1424 f" The build {job}/{build[u'build']} is "
1425 f"outdated, will be removed.")
1427 logs.append((u"INFO", u" Done."))
1429 for level, line in logs:
1430 if level == u"INFO":
1432 elif level == u"ERROR":
1434 elif level == u"DEBUG":
1436 elif level == u"CRITICAL":
1437 logging.critical(line)
1438 elif level == u"WARNING":
1439 logging.warning(line)
1441 return {u"data": data, u"state": state, u"job": job, u"build": build}
1443 def download_and_parse_data(self, repeat=1):
1444 """Download the input data files, parse input data from input files and
1445 store in pandas' Series.
1447 :param repeat: Repeat the download specified number of times if not
1452 logging.info(u"Downloading and parsing input files ...")
1454 for job, builds in self._cfg.builds.items():
1455 for build in builds:
1457 result = self._download_and_parse_build(job, build, repeat)
1458 build_nr = result[u"build"][u"build"]
1461 data = result[u"data"]
1462 build_data = pd.Series({
1463 u"metadata": pd.Series(
1464 list(data[u"metadata"].values()),
1465 index=list(data[u"metadata"].keys())
1467 u"suites": pd.Series(
1468 list(data[u"suites"].values()),
1469 index=list(data[u"suites"].keys())
1471 u"tests": pd.Series(
1472 list(data[u"tests"].values()),
1473 index=list(data[u"tests"].keys())
1477 if self._input_data.get(job, None) is None:
1478 self._input_data[job] = pd.Series()
1479 self._input_data[job][str(build_nr)] = build_data
1481 self._cfg.set_input_file_name(
1482 job, build_nr, result[u"build"][u"file-name"])
1484 self._cfg.set_input_state(job, build_nr, result[u"state"])
1487 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1488 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1490 logging.info(u"Done.")
1493 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1494 """Return the index of character in the string which is the end of tag.
1496 :param tag_filter: The string where the end of tag is being searched.
1497 :param start: The index where the searching is stated.
1498 :param closer: The character which is the tag closer.
1499 :type tag_filter: str
1502 :returns: The index of the tag closer.
1507 idx_opener = tag_filter.index(closer, start)
1508 return tag_filter.index(closer, idx_opener + 1)
1513 def _condition(tag_filter):
1514 """Create a conditional statement from the given tag filter.
1516 :param tag_filter: Filter based on tags from the element specification.
1517 :type tag_filter: str
1518 :returns: Conditional statement which can be evaluated.
1524 index = InputData._end_of_tag(tag_filter, index)
1528 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1530 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1531 continue_on_error=False):
1532 """Filter required data from the given jobs and builds.
1534 The output data structure is:
1538 - test (or suite) 1 ID:
1544 - test (or suite) n ID:
1551 :param element: Element which will use the filtered data.
1552 :param params: Parameters which will be included in the output. If None,
1553 all parameters are included.
1554 :param data: If not None, this data is used instead of data specified
1556 :param data_set: The set of data to be filtered: tests, suites,
1558 :param continue_on_error: Continue if there is error while reading the
1559 data. The Item will be empty then
1560 :type element: pandas.Series
1564 :type continue_on_error: bool
1565 :returns: Filtered data.
1566 :rtype pandas.Series
1570 if data_set == "suites":
1572 elif element[u"filter"] in (u"all", u"template"):
1575 cond = InputData._condition(element[u"filter"])
1576 logging.debug(f" Filter: {cond}")
1578 logging.error(u" No filter defined.")
1582 params = element.get(u"parameters", None)
1584 params.append(u"type")
1586 data_to_filter = data if data else element[u"data"]
1589 for job, builds in data_to_filter.items():
1590 data[job] = pd.Series()
1591 for build in builds:
1592 data[job][str(build)] = pd.Series()
1595 self.data[job][str(build)][data_set].items())
1597 if continue_on_error:
1601 for test_id, test_data in data_dict.items():
1602 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1603 data[job][str(build)][test_id] = pd.Series()
1605 for param, val in test_data.items():
1606 data[job][str(build)][test_id][param] = val
1608 for param in params:
1610 data[job][str(build)][test_id][param] =\
1613 data[job][str(build)][test_id][param] =\
1617 except (KeyError, IndexError, ValueError) as err:
1619 f"Missing mandatory parameter in the element specification: "
1623 except AttributeError as err:
1624 logging.error(repr(err))
1626 except SyntaxError as err:
1628 f"The filter {cond} is not correct. Check if all tags are "
1629 f"enclosed by apostrophes.\n{repr(err)}"
1633 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1634 continue_on_error=False):
1635 """Filter required data from the given jobs and builds.
1637 The output data structure is:
1641 - test (or suite) 1 ID:
1647 - test (or suite) n ID:
1654 :param element: Element which will use the filtered data.
1655 :param params: Parameters which will be included in the output. If None,
1656 all parameters are included.
1657 :param data_set: The set of data to be filtered: tests, suites,
1659 :param continue_on_error: Continue if there is error while reading the
1660 data. The Item will be empty then
1661 :type element: pandas.Series
1664 :type continue_on_error: bool
1665 :returns: Filtered data.
1666 :rtype pandas.Series
1669 include = element.get(u"include", None)
1671 logging.warning(u"No tests to include, skipping the element.")
1675 params = element.get(u"parameters", None)
1677 params.append(u"type")
1681 for job, builds in element[u"data"].items():
1682 data[job] = pd.Series()
1683 for build in builds:
1684 data[job][str(build)] = pd.Series()
1685 for test in include:
1687 reg_ex = re.compile(str(test).lower())
1688 for test_id in self.data[job][
1689 str(build)][data_set].keys():
1690 if re.match(reg_ex, str(test_id).lower()):
1691 test_data = self.data[job][
1692 str(build)][data_set][test_id]
1693 data[job][str(build)][test_id] = pd.Series()
1695 for param, val in test_data.items():
1696 data[job][str(build)][test_id]\
1699 for param in params:
1701 data[job][str(build)][
1705 data[job][str(build)][
1706 test_id][param] = u"No Data"
1707 except KeyError as err:
1708 logging.error(repr(err))
1709 if continue_on_error:
1714 except (KeyError, IndexError, ValueError) as err:
1716 f"Missing mandatory parameter in the element "
1717 f"specification: {repr(err)}"
1720 except AttributeError as err:
1721 logging.error(repr(err))
1725 def merge_data(data):
1726 """Merge data from more jobs and builds to a simple data structure.
1728 The output data structure is:
1730 - test (suite) 1 ID:
1736 - test (suite) n ID:
1739 :param data: Data to merge.
1740 :type data: pandas.Series
1741 :returns: Merged data.
1742 :rtype: pandas.Series
1745 logging.info(u" Merging data ...")
1747 merged_data = pd.Series()
1748 for builds in data.values:
1749 for item in builds.values:
1750 for item_id, item_data in item.items():
1751 merged_data[item_id] = item_data
1755 def print_all_oper_data(self):
1756 """Print all operational data to console.
1764 u"Cycles per Packet",
1765 u"Average Vector Size"
1768 for job in self._input_data.values:
1769 for build in job.values:
1770 for test_id, test_data in build[u"tests"].items():
1772 if test_data.get(u"show-run", None) is None:
1774 for dut_name, data in test_data[u"show-run"].items():
1775 if data.get(u"threads", None) is None:
1777 print(f"Host IP: {data.get(u'host', '')}, "
1778 f"Socket: {data.get(u'socket', '')}")
1779 for thread_nr, thread in data[u"threads"].items():
1780 txt_table = prettytable.PrettyTable(tbl_hdr)
1783 txt_table.add_row(row)
1785 if len(thread) == 0:
1788 avg = f", Average Vector Size per Node: " \
1789 f"{(avg / len(thread)):.2f}"
1790 th_name = u"main" if thread_nr == 0 \
1791 else f"worker_{thread_nr}"
1792 print(f"{dut_name}, {th_name}{avg}")
1793 txt_table.float_format = u".2"
1794 txt_table.align = u"r"
1795 txt_table.align[u"Name"] = u"l"
1796 print(f"{txt_table.get_string()}\n")