1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
38 from robot.api import ExecutionResult, ResultVisitor
39 from robot import errors
41 from resources.libraries.python import jumpavg
42 from input_data_files import download_and_unzip_data_file
45 # Separator used in file names
49 class ExecutionChecker(ResultVisitor):
50 """Class to traverse through the test suite structure.
52 The functionality implemented in this class generates a json structure:
58 "generated": "Timestamp",
59 "version": "SUT version",
60 "job": "Jenkins job name",
61 "build": "Information about the build"
64 "Suite long name 1": {
66 "doc": "Suite 1 documentation",
67 "parent": "Suite 1 parent",
68 "level": "Level of the suite in the suite hierarchy"
70 "Suite long name N": {
72 "doc": "Suite N documentation",
73 "parent": "Suite 2 parent",
74 "level": "Level of the suite in the suite hierarchy"
81 "parent": "Name of the parent of the test",
82 "doc": "Test documentation",
83 "msg": "Test message",
84 "conf-history": "DUT1 and DUT2 VAT History",
85 "show-run": "Show Run",
86 "tags": ["tag 1", "tag 2", "tag n"],
88 "status": "PASS" | "FAIL",
134 "parent": "Name of the parent of the test",
135 "doc": "Test documentation",
136 "msg": "Test message",
137 "tags": ["tag 1", "tag 2", "tag n"],
139 "status": "PASS" | "FAIL",
146 "parent": "Name of the parent of the test",
147 "doc": "Test documentation",
148 "msg": "Test message",
149 "tags": ["tag 1", "tag 2", "tag n"],
150 "type": "MRR" | "BMRR",
151 "status": "PASS" | "FAIL",
153 "receive-rate": float,
154 # Average of a list, computed using AvgStdevStats.
155 # In CSIT-1180, replace with List[float].
169 "metadata": { # Optional
170 "version": "VPP version",
171 "job": "Jenkins job name",
172 "build": "Information about the build"
176 "doc": "Suite 1 documentation",
177 "parent": "Suite 1 parent",
178 "level": "Level of the suite in the suite hierarchy"
181 "doc": "Suite N documentation",
182 "parent": "Suite 2 parent",
183 "level": "Level of the suite in the suite hierarchy"
189 "parent": "Name of the parent of the test",
190 "doc": "Test documentation"
191 "msg": "Test message"
192 "tags": ["tag 1", "tag 2", "tag n"],
193 "conf-history": "DUT1 and DUT2 VAT History"
194 "show-run": "Show Run"
195 "status": "PASS" | "FAIL"
203 .. note:: ID is the lowercase full path to the test.
206 REGEX_PLR_RATE = re.compile(
207 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
208 r'PLRsearch upper bound::?\s(\d+.\d+)'
210 REGEX_NDRPDR_RATE = re.compile(
211 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'NDR_UPPER:\s(\d+.\d+).*\n'
213 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
214 r'PDR_UPPER:\s(\d+.\d+)'
216 REGEX_PERF_MSG_INFO = re.compile(
217 r'NDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
218 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
219 r'NDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
220 r'PDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
221 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
222 r'PDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*)'
224 # TODO: Remove when not needed
225 REGEX_NDRPDR_LAT_BASE = re.compile(
226 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
227 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
229 REGEX_NDRPDR_LAT = re.compile(
230 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
231 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
237 # TODO: Remove when not needed
238 REGEX_NDRPDR_LAT_LONG = re.compile(
239 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
249 REGEX_VERSION_VPP = re.compile(
250 r"(return STDOUT Version:\s*|"
251 r"VPP Version:\s*|VPP version:\s*)(.*)"
253 REGEX_VERSION_DPDK = re.compile(
254 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
256 REGEX_TCP = re.compile(
257 r'Total\s(rps|cps|throughput):\s(\d*).*$'
259 REGEX_MRR = re.compile(
260 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
261 r'tx\s(\d*),\srx\s(\d*)'
263 REGEX_BMRR = re.compile(
264 r'Maximum Receive Rate trial results'
265 r' in packets per second: \[(.*)\]'
267 REGEX_RECONF_LOSS = re.compile(
268 r'Packets lost due to reconfig: (\d*)'
270 REGEX_RECONF_TIME = re.compile(
271 r'Implied time lost: (\d*.[\de-]*)'
273 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
275 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
277 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
279 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
281 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
283 def __init__(self, metadata, mapping, ignore):
286 :param metadata: Key-value pairs to be included in "metadata" part of
288 :param mapping: Mapping of the old names of test cases to the new
290 :param ignore: List of TCs to be ignored.
296 # Type of message to parse out from the test messages
297 self._msg_type = None
303 self._timestamp = None
305 # Testbed. The testbed is identified by TG node IP address.
308 # Mapping of TCs long names
309 self._mapping = mapping
312 self._ignore = ignore
314 # Number of PAPI History messages found:
316 # 1 - PAPI History of DUT1
317 # 2 - PAPI History of DUT2
318 self._conf_history_lookup_nr = 0
320 # Test ID of currently processed test- the lowercase full path to the
324 # The main data structure
326 u"metadata": OrderedDict(),
327 u"suites": OrderedDict(),
328 u"tests": OrderedDict()
331 # Save the provided metadata
332 for key, val in metadata.items():
333 self._data[u"metadata"][key] = val
335 # Dictionary defining the methods used to parse different types of
338 u"timestamp": self._get_timestamp,
339 u"vpp-version": self._get_vpp_version,
340 u"dpdk-version": self._get_dpdk_version,
341 # TODO: Remove when not needed:
342 u"teardown-vat-history": self._get_vat_history,
343 u"teardown-papi-history": self._get_papi_history,
344 u"test-show-runtime": self._get_show_run,
345 u"testbed": self._get_testbed
350 """Getter - Data parsed from the XML file.
352 :returns: Data parsed from the XML file.
357 def _get_data_from_perf_test_msg(self, msg):
365 from message of NDRPDR performance tests.
367 :param msg: Message to be processed.
369 :returns: Processed message or original message if a problem occurs.
373 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
374 if not groups or groups.lastindex != 20:
379 u"ndr_low": float(groups.group(1)),
380 u"ndr_low_unit": groups.group(2),
381 u"ndr_low_b": float(groups.group(3)),
382 u"ndr_low_b_unit": groups.group(4),
383 u"ndr_lat_1": groups.group(5),
384 u"ndr_lat_2": groups.group(6),
385 u"ndr_up": float(groups.group(7)),
386 u"ndr_up_unit": groups.group(8),
387 u"ndr_up_b": float(groups.group(9)),
388 u"ndr_up_b_unit": groups.group(10),
389 u"pdr_low": float(groups.group(11)),
390 u"pdr_low_unit": groups.group(12),
391 u"pdr_low_b": float(groups.group(13)),
392 u"pdr_low_b_unit": groups.group(14),
393 u"pdr_lat_1": groups.group(15),
394 u"pdr_lat_2": groups.group(16),
395 u"pdr_up": float(groups.group(17)),
396 u"pdr_up_unit": groups.group(18),
397 u"pdr_up_b": float(groups.group(19)),
398 u"pdr_up_b_unit": groups.group(20)
400 except (AttributeError, IndexError, ValueError, KeyError):
403 def _process_lat(in_str_1, in_str_2):
404 """Extract min, avg, max values from latency string.
406 :param in_str_1: Latency string for one direction produced by robot
408 :param in_str_2: Latency string for second direction produced by
412 :returns: Processed latency string or original string if a problem
414 :rtype: tuple(str, str)
416 in_list_1 = in_str_1.split('/', 3)
417 if len(in_list_1) < 3:
418 return u"Not Measured.", u"Not Measured."
420 in_list_2 = in_str_2.split('/', 3)
421 if len(in_list_2) < 3:
422 return u"Not Measured.", u"Not Measured."
425 if len(in_list_1) == 4:
426 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
428 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
429 except hdrh.codec.HdrLengthException:
432 if len(in_list_2) == 4:
433 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
435 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
436 except hdrh.codec.HdrLengthException:
439 hdr_lat = u"Not Measured."
440 if hdr_lat_1 and hdr_lat_2:
442 f"50%/90%/99%/99.9%, "
443 f"{hdr_lat_1.get_value_at_percentile(50.0)}/"
444 f"{hdr_lat_1.get_value_at_percentile(90.0)}/"
445 f"{hdr_lat_1.get_value_at_percentile(99.0)}/"
446 f"{hdr_lat_1.get_value_at_percentile(99.9)}, "
447 f"{hdr_lat_2.get_value_at_percentile(50.0)}/"
448 f"{hdr_lat_2.get_value_at_percentile(90.0)}/"
449 f"{hdr_lat_2.get_value_at_percentile(99.0)}/"
450 f"{hdr_lat_2.get_value_at_percentile(99.9)} "
456 f"{in_list_1[0]}/{in_list_1[1]}/{in_list_1[2]}, "
457 f"{in_list_2[0]}/{in_list_2[1]}/{in_list_2[2]} uSec.",
462 pdr_lat = _process_lat(data[u'pdr_lat_1'], data[u'pdr_lat_2'])
463 ndr_lat = _process_lat(data[u'ndr_lat_1'], data[u'ndr_lat_2'])
465 f"NDR Throughput: {(data[u'ndr_low'] / 1e6):.2f} "
466 f"M{data[u'ndr_low_unit']}, "
467 f"{data[u'ndr_low_b']:.2f} {data[u'ndr_low_b_unit']}.\n"
468 f"One-Way Latency at NDR: {ndr_lat[0]}\n"
469 f"One-Way Latency at NDR by percentiles: {ndr_lat[1]}\n"
470 f"PDR Throughput: {(data[u'pdr_low'] / 1e6):.2f} "
471 f"M{data[u'pdr_low_unit']}, "
472 f"{data[u'pdr_low_b']:.2f} {data[u'pdr_low_b_unit']}.\n"
473 f"One-Way Latency at PDR: {pdr_lat[0]}\n"
474 f"One-Way Latency at PDR by percentiles: {pdr_lat[1]}"
476 except (AttributeError, IndexError, ValueError, KeyError):
479 def _get_testbed(self, msg):
480 """Called when extraction of testbed IP is required.
481 The testbed is identified by TG node IP address.
483 :param msg: Message to process.
488 if msg.message.count(u"Setup of TG node") or \
489 msg.message.count(u"Setup of node TG host"):
490 reg_tg_ip = re.compile(
491 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
493 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
494 except (KeyError, ValueError, IndexError, AttributeError):
497 self._data[u"metadata"][u"testbed"] = self._testbed
498 self._msg_type = None
500 def _get_vpp_version(self, msg):
501 """Called when extraction of VPP version is required.
503 :param msg: Message to process.
508 if msg.message.count(u"return STDOUT Version:") or \
509 msg.message.count(u"VPP Version:") or \
510 msg.message.count(u"VPP version:"):
511 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
513 self._data[u"metadata"][u"version"] = self._version
514 self._msg_type = None
516 def _get_dpdk_version(self, msg):
517 """Called when extraction of DPDK version is required.
519 :param msg: Message to process.
524 if msg.message.count(u"DPDK Version:"):
526 self._version = str(re.search(
527 self.REGEX_VERSION_DPDK, msg.message).group(2))
528 self._data[u"metadata"][u"version"] = self._version
532 self._msg_type = None
534 def _get_timestamp(self, msg):
535 """Called when extraction of timestamp is required.
537 :param msg: Message to process.
542 self._timestamp = msg.timestamp[:14]
543 self._data[u"metadata"][u"generated"] = self._timestamp
544 self._msg_type = None
546 def _get_vat_history(self, msg):
547 """Called when extraction of VAT command history is required.
549 TODO: Remove when not needed.
551 :param msg: Message to process.
555 if msg.message.count(u"VAT command history:"):
556 self._conf_history_lookup_nr += 1
557 if self._conf_history_lookup_nr == 1:
558 self._data[u"tests"][self._test_id][u"conf-history"] = str()
560 self._msg_type = None
561 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
562 r"VAT command history:", u"",
563 msg.message, count=1).replace(u'\n', u' |br| ').\
566 self._data[u"tests"][self._test_id][u"conf-history"] += (
567 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
570 def _get_papi_history(self, msg):
571 """Called when extraction of PAPI command history is required.
573 :param msg: Message to process.
577 if msg.message.count(u"PAPI command history:"):
578 self._conf_history_lookup_nr += 1
579 if self._conf_history_lookup_nr == 1:
580 self._data[u"tests"][self._test_id][u"conf-history"] = str()
582 self._msg_type = None
583 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
584 r"PAPI command history:", u"",
585 msg.message, count=1).replace(u'\n', u' |br| ').\
587 self._data[u"tests"][self._test_id][u"conf-history"] += (
588 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
591 def _get_show_run(self, msg):
592 """Called when extraction of VPP operational data (output of CLI command
593 Show Runtime) is required.
595 :param msg: Message to process.
600 if not msg.message.count(u"stats runtime"):
603 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
604 self._data[u"tests"][self._test_id][u"show-run"] = dict()
606 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
610 host = groups.group(1)
611 except (AttributeError, IndexError):
614 sock = groups.group(2)
615 except (AttributeError, IndexError):
618 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
619 replace(u"'", u'"').replace(u'b"', u'"').
620 replace(u'u"', u'"').split(u":", 1)[1])
623 threads_nr = len(runtime[0][u"clocks"])
624 except (IndexError, KeyError):
627 dut = u"DUT{nr}".format(
628 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
633 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
637 for idx in range(threads_nr):
638 if item[u"vectors"][idx] > 0:
639 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
640 elif item[u"calls"][idx] > 0:
641 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
642 elif item[u"suspends"][idx] > 0:
643 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
647 if item[u"calls"][idx] > 0:
648 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
652 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
653 int(item[u"suspends"][idx]):
654 oper[u"threads"][idx].append([
657 item[u"vectors"][idx],
658 item[u"suspends"][idx],
663 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
665 def _get_ndrpdr_throughput(self, msg):
666 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
669 :param msg: The test message to be parsed.
671 :returns: Parsed data as a dict and the status (PASS/FAIL).
672 :rtype: tuple(dict, str)
676 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
677 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
680 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
682 if groups is not None:
684 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
685 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
686 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
687 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
689 except (IndexError, ValueError):
692 return throughput, status
694 def _get_plr_throughput(self, msg):
695 """Get PLRsearch lower bound and PLRsearch upper bound from the test
698 :param msg: The test message to be parsed.
700 :returns: Parsed data as a dict and the status (PASS/FAIL).
701 :rtype: tuple(dict, str)
709 groups = re.search(self.REGEX_PLR_RATE, msg)
711 if groups is not None:
713 throughput[u"LOWER"] = float(groups.group(1))
714 throughput[u"UPPER"] = float(groups.group(2))
716 except (IndexError, ValueError):
719 return throughput, status
721 def _get_ndrpdr_latency(self, msg):
722 """Get LATENCY from the test message.
724 :param msg: The test message to be parsed.
726 :returns: Parsed data as a dict and the status (PASS/FAIL).
727 :rtype: tuple(dict, str)
737 u"direction1": copy.copy(latency_default),
738 u"direction2": copy.copy(latency_default)
741 u"direction1": copy.copy(latency_default),
742 u"direction2": copy.copy(latency_default)
745 u"direction1": copy.copy(latency_default),
746 u"direction2": copy.copy(latency_default)
749 u"direction1": copy.copy(latency_default),
750 u"direction2": copy.copy(latency_default)
753 u"direction1": copy.copy(latency_default),
754 u"direction2": copy.copy(latency_default)
757 u"direction1": copy.copy(latency_default),
758 u"direction2": copy.copy(latency_default)
762 # TODO: Rewrite when long and base are not needed
763 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
765 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
767 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
769 return latency, u"FAIL"
771 def process_latency(in_str):
772 """Return object with parsed latency values.
774 TODO: Define class for the return type.
776 :param in_str: Input string, min/avg/max/hdrh format.
778 :returns: Dict with corresponding keys, except hdrh float values.
780 :throws IndexError: If in_str does not have enough substrings.
781 :throws ValueError: If a substring does not convert to float.
783 in_list = in_str.split('/', 3)
786 u"min": float(in_list[0]),
787 u"avg": float(in_list[1]),
788 u"max": float(in_list[2]),
792 if len(in_list) == 4:
793 rval[u"hdrh"] = str(in_list[3])
798 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
799 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
800 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
801 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
802 if groups.lastindex == 4:
803 return latency, u"PASS"
804 except (IndexError, ValueError):
808 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
809 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
810 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
811 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
812 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
813 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
814 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
815 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
816 if groups.lastindex == 12:
817 return latency, u"PASS"
818 except (IndexError, ValueError):
821 # TODO: Remove when not needed
822 latency[u"NDR10"] = {
823 u"direction1": copy.copy(latency_default),
824 u"direction2": copy.copy(latency_default)
826 latency[u"NDR50"] = {
827 u"direction1": copy.copy(latency_default),
828 u"direction2": copy.copy(latency_default)
830 latency[u"NDR90"] = {
831 u"direction1": copy.copy(latency_default),
832 u"direction2": copy.copy(latency_default)
835 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
836 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
837 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
838 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
839 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
840 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
841 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
842 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
843 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
844 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
845 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
846 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
847 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
848 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
849 return latency, u"PASS"
850 except (IndexError, ValueError):
853 return latency, u"FAIL"
855 def visit_suite(self, suite):
856 """Implements traversing through the suite and its direct children.
858 :param suite: Suite to process.
862 if self.start_suite(suite) is not False:
863 suite.suites.visit(self)
864 suite.tests.visit(self)
865 self.end_suite(suite)
867 def start_suite(self, suite):
868 """Called when suite starts.
870 :param suite: Suite to process.
876 parent_name = suite.parent.name
877 except AttributeError:
880 doc_str = suite.doc.\
881 replace(u'"', u"'").\
882 replace(u'\n', u' ').\
883 replace(u'\r', u'').\
884 replace(u'*[', u' |br| *[').\
885 replace(u"*", u"**").\
886 replace(u' |br| *[', u'*[', 1)
888 self._data[u"suites"][suite.longname.lower().
890 replace(u" ", u"_")] = {
891 u"name": suite.name.lower(),
893 u"parent": parent_name,
894 u"level": len(suite.longname.split(u"."))
897 suite.keywords.visit(self)
899 def end_suite(self, suite):
900 """Called when suite ends.
902 :param suite: Suite to process.
907 def visit_test(self, test):
908 """Implements traversing through the test.
910 :param test: Test to process.
914 if self.start_test(test) is not False:
915 test.keywords.visit(self)
918 def start_test(self, test):
919 """Called when test starts.
921 :param test: Test to process.
926 longname_orig = test.longname.lower()
928 # Check the ignore list
929 if longname_orig in self._ignore:
932 tags = [str(tag) for tag in test.tags]
935 # Change the TC long name and name if defined in the mapping table
936 longname = self._mapping.get(longname_orig, None)
937 if longname is not None:
938 name = longname.split(u'.')[-1]
940 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
944 longname = longname_orig
945 name = test.name.lower()
947 # Remove TC number from the TC long name (backward compatibility):
948 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
949 # Remove TC number from the TC name (not needed):
950 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
952 test_result[u"parent"] = test.parent.name.lower()
953 test_result[u"tags"] = tags
954 test_result["doc"] = test.doc.\
955 replace(u'"', u"'").\
956 replace(u'\n', u' ').\
957 replace(u'\r', u'').\
958 replace(u'[', u' |br| [').\
959 replace(u' |br| [', u'[', 1)
960 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
961 replace(u'\n', u' |br| ').\
962 replace(u'\r', u'').\
964 test_result[u"type"] = u"FUNC"
965 test_result[u"status"] = test.status
967 if u"PERFTEST" in tags:
968 # Replace info about cores (e.g. -1c-) with the info about threads
969 # and cores (e.g. -1t1c-) in the long test case names and in the
970 # test case names if necessary.
971 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
975 for tag in test_result[u"tags"]:
976 groups = re.search(self.REGEX_TC_TAG, tag)
982 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
983 f"-{tag_tc.lower()}-",
986 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
987 f"-{tag_tc.lower()}-",
991 test_result[u"status"] = u"FAIL"
992 self._data[u"tests"][self._test_id] = test_result
994 f"The test {self._test_id} has no or more than one "
995 f"multi-threading tags.\n"
996 f"Tags: {test_result[u'tags']}"
1000 if test.status == u"PASS":
1001 if u"NDRPDR" in tags:
1002 test_result[u"type"] = u"NDRPDR"
1003 test_result[u"throughput"], test_result[u"status"] = \
1004 self._get_ndrpdr_throughput(test.message)
1005 test_result[u"latency"], test_result[u"status"] = \
1006 self._get_ndrpdr_latency(test.message)
1007 elif u"SOAK" in tags:
1008 test_result[u"type"] = u"SOAK"
1009 test_result[u"throughput"], test_result[u"status"] = \
1010 self._get_plr_throughput(test.message)
1011 elif u"TCP" in tags:
1012 test_result[u"type"] = u"TCP"
1013 groups = re.search(self.REGEX_TCP, test.message)
1014 test_result[u"result"] = int(groups.group(2))
1015 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1017 test_result[u"type"] = u"MRR"
1019 test_result[u"type"] = u"BMRR"
1021 test_result[u"result"] = dict()
1022 groups = re.search(self.REGEX_BMRR, test.message)
1023 if groups is not None:
1024 items_str = groups.group(1)
1025 items_float = [float(item.strip()) for item
1026 in items_str.split(",")]
1027 # Use whole list in CSIT-1180.
1028 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1029 test_result[u"result"][u"receive-rate"] = stats.avg
1031 groups = re.search(self.REGEX_MRR, test.message)
1032 test_result[u"result"][u"receive-rate"] = \
1033 float(groups.group(3)) / float(groups.group(1))
1034 elif u"RECONF" in tags:
1035 test_result[u"type"] = u"RECONF"
1036 test_result[u"result"] = None
1038 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1039 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1040 test_result[u"result"] = {
1041 u"loss": int(grps_loss.group(1)),
1042 u"time": float(grps_time.group(1))
1044 except (AttributeError, IndexError, ValueError, TypeError):
1045 test_result[u"status"] = u"FAIL"
1047 test_result[u"status"] = u"FAIL"
1048 self._data[u"tests"][self._test_id] = test_result
1051 self._data[u"tests"][self._test_id] = test_result
1053 def end_test(self, test):
1054 """Called when test ends.
1056 :param test: Test to process.
1061 def visit_keyword(self, keyword):
1062 """Implements traversing through the keyword and its child keywords.
1064 :param keyword: Keyword to process.
1065 :type keyword: Keyword
1068 if self.start_keyword(keyword) is not False:
1069 self.end_keyword(keyword)
1071 def start_keyword(self, keyword):
1072 """Called when keyword starts. Default implementation does nothing.
1074 :param keyword: Keyword to process.
1075 :type keyword: Keyword
1079 if keyword.type == u"setup":
1080 self.visit_setup_kw(keyword)
1081 elif keyword.type == u"teardown":
1082 self.visit_teardown_kw(keyword)
1084 self.visit_test_kw(keyword)
1085 except AttributeError:
1088 def end_keyword(self, keyword):
1089 """Called when keyword ends. Default implementation does nothing.
1091 :param keyword: Keyword to process.
1092 :type keyword: Keyword
1096 def visit_test_kw(self, test_kw):
1097 """Implements traversing through the test keyword and its child
1100 :param test_kw: Keyword to process.
1101 :type test_kw: Keyword
1104 for keyword in test_kw.keywords:
1105 if self.start_test_kw(keyword) is not False:
1106 self.visit_test_kw(keyword)
1107 self.end_test_kw(keyword)
1109 def start_test_kw(self, test_kw):
1110 """Called when test keyword starts. Default implementation does
1113 :param test_kw: Keyword to process.
1114 :type test_kw: Keyword
1117 if test_kw.name.count(u"Show Runtime On All Duts") or \
1118 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1119 self._msg_type = u"test-show-runtime"
1120 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1121 self._msg_type = u"dpdk-version"
1124 test_kw.messages.visit(self)
1126 def end_test_kw(self, test_kw):
1127 """Called when keyword ends. Default implementation does nothing.
1129 :param test_kw: Keyword to process.
1130 :type test_kw: Keyword
1134 def visit_setup_kw(self, setup_kw):
1135 """Implements traversing through the teardown keyword and its child
1138 :param setup_kw: Keyword to process.
1139 :type setup_kw: Keyword
1142 for keyword in setup_kw.keywords:
1143 if self.start_setup_kw(keyword) is not False:
1144 self.visit_setup_kw(keyword)
1145 self.end_setup_kw(keyword)
1147 def start_setup_kw(self, setup_kw):
1148 """Called when teardown keyword starts. Default implementation does
1151 :param setup_kw: Keyword to process.
1152 :type setup_kw: Keyword
1155 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1156 and not self._version:
1157 self._msg_type = u"vpp-version"
1158 elif setup_kw.name.count(u"Set Global Variable") \
1159 and not self._timestamp:
1160 self._msg_type = u"timestamp"
1161 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1162 self._msg_type = u"testbed"
1165 setup_kw.messages.visit(self)
1167 def end_setup_kw(self, setup_kw):
1168 """Called when keyword ends. Default implementation does nothing.
1170 :param setup_kw: Keyword to process.
1171 :type setup_kw: Keyword
1175 def visit_teardown_kw(self, teardown_kw):
1176 """Implements traversing through the teardown keyword and its child
1179 :param teardown_kw: Keyword to process.
1180 :type teardown_kw: Keyword
1183 for keyword in teardown_kw.keywords:
1184 if self.start_teardown_kw(keyword) is not False:
1185 self.visit_teardown_kw(keyword)
1186 self.end_teardown_kw(keyword)
1188 def start_teardown_kw(self, teardown_kw):
1189 """Called when teardown keyword starts
1191 :param teardown_kw: Keyword to process.
1192 :type teardown_kw: Keyword
1196 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1197 # TODO: Remove when not needed:
1198 self._conf_history_lookup_nr = 0
1199 self._msg_type = u"teardown-vat-history"
1200 teardown_kw.messages.visit(self)
1201 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1202 self._conf_history_lookup_nr = 0
1203 self._msg_type = u"teardown-papi-history"
1204 teardown_kw.messages.visit(self)
1206 def end_teardown_kw(self, teardown_kw):
1207 """Called when keyword ends. Default implementation does nothing.
1209 :param teardown_kw: Keyword to process.
1210 :type teardown_kw: Keyword
1214 def visit_message(self, msg):
1215 """Implements visiting the message.
1217 :param msg: Message to process.
1221 if self.start_message(msg) is not False:
1222 self.end_message(msg)
1224 def start_message(self, msg):
1225 """Called when message starts. Get required information from messages:
1228 :param msg: Message to process.
1234 self.parse_msg[self._msg_type](msg)
1236 def end_message(self, msg):
1237 """Called when message ends. Default implementation does nothing.
1239 :param msg: Message to process.
1248 The data is extracted from output.xml files generated by Jenkins jobs and
1249 stored in pandas' DataFrames.
1255 (as described in ExecutionChecker documentation)
1257 (as described in ExecutionChecker documentation)
1259 (as described in ExecutionChecker documentation)
1262 def __init__(self, spec):
1265 :param spec: Specification.
1266 :type spec: Specification
1273 self._input_data = pd.Series()
1277 """Getter - Input data.
1279 :returns: Input data
1280 :rtype: pandas.Series
1282 return self._input_data
1284 def metadata(self, job, build):
1285 """Getter - metadata
1287 :param job: Job which metadata we want.
1288 :param build: Build which metadata we want.
1292 :rtype: pandas.Series
1295 return self.data[job][build][u"metadata"]
1297 def suites(self, job, build):
1300 :param job: Job which suites we want.
1301 :param build: Build which suites we want.
1305 :rtype: pandas.Series
1308 return self.data[job][str(build)][u"suites"]
1310 def tests(self, job, build):
1313 :param job: Job which tests we want.
1314 :param build: Build which tests we want.
1318 :rtype: pandas.Series
1321 return self.data[job][build][u"tests"]
1323 def _parse_tests(self, job, build, log):
1324 """Process data from robot output.xml file and return JSON structured
1327 :param job: The name of job which build output data will be processed.
1328 :param build: The build which output data will be processed.
1329 :param log: List of log messages.
1332 :type log: list of tuples (severity, msg)
1333 :returns: JSON data structure.
1342 with open(build[u"file-name"], u'r') as data_file:
1344 result = ExecutionResult(data_file)
1345 except errors.DataError as err:
1347 (u"ERROR", f"Error occurred while parsing output.xml: "
1351 checker = ExecutionChecker(metadata, self._cfg.mapping,
1353 result.visit(checker)
1357 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1358 """Download and parse the input data file.
1360 :param pid: PID of the process executing this method.
1361 :param job: Name of the Jenkins job which generated the processed input
1363 :param build: Information about the Jenkins build which generated the
1364 processed input file.
1365 :param repeat: Repeat the download specified number of times if not
1376 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1384 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1392 f"It is not possible to download the input data file from the "
1393 f"job {job}, build {build[u'build']}, or it is damaged. "
1399 f" Processing data from the build {build[u'build']} ...")
1401 data = self._parse_tests(job, build, logs)
1405 f"Input data file from the job {job}, build "
1406 f"{build[u'build']} is damaged. Skipped.")
1409 state = u"processed"
1412 remove(build[u"file-name"])
1413 except OSError as err:
1415 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1419 # If the time-period is defined in the specification file, remove all
1420 # files which are outside the time period.
1421 timeperiod = self._cfg.input.get(u"time-period", None)
1422 if timeperiod and data:
1424 timeperiod = timedelta(int(timeperiod))
1425 metadata = data.get(u"metadata", None)
1427 generated = metadata.get(u"generated", None)
1429 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1430 if (now - generated) > timeperiod:
1431 # Remove the data and the file:
1436 f" The build {job}/{build[u'build']} is "
1437 f"outdated, will be removed.")
1439 logs.append((u"INFO", u" Done."))
1441 for level, line in logs:
1442 if level == u"INFO":
1444 elif level == u"ERROR":
1446 elif level == u"DEBUG":
1448 elif level == u"CRITICAL":
1449 logging.critical(line)
1450 elif level == u"WARNING":
1451 logging.warning(line)
1453 return {u"data": data, u"state": state, u"job": job, u"build": build}
1455 def download_and_parse_data(self, repeat=1):
1456 """Download the input data files, parse input data from input files and
1457 store in pandas' Series.
1459 :param repeat: Repeat the download specified number of times if not
1464 logging.info(u"Downloading and parsing input files ...")
1466 for job, builds in self._cfg.builds.items():
1467 for build in builds:
1469 result = self._download_and_parse_build(job, build, repeat)
1470 build_nr = result[u"build"][u"build"]
1473 data = result[u"data"]
1474 build_data = pd.Series({
1475 u"metadata": pd.Series(
1476 list(data[u"metadata"].values()),
1477 index=list(data[u"metadata"].keys())
1479 u"suites": pd.Series(
1480 list(data[u"suites"].values()),
1481 index=list(data[u"suites"].keys())
1483 u"tests": pd.Series(
1484 list(data[u"tests"].values()),
1485 index=list(data[u"tests"].keys())
1489 if self._input_data.get(job, None) is None:
1490 self._input_data[job] = pd.Series()
1491 self._input_data[job][str(build_nr)] = build_data
1493 self._cfg.set_input_file_name(
1494 job, build_nr, result[u"build"][u"file-name"])
1496 self._cfg.set_input_state(job, build_nr, result[u"state"])
1499 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1500 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1502 logging.info(u"Done.")
1505 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1506 """Return the index of character in the string which is the end of tag.
1508 :param tag_filter: The string where the end of tag is being searched.
1509 :param start: The index where the searching is stated.
1510 :param closer: The character which is the tag closer.
1511 :type tag_filter: str
1514 :returns: The index of the tag closer.
1519 idx_opener = tag_filter.index(closer, start)
1520 return tag_filter.index(closer, idx_opener + 1)
1525 def _condition(tag_filter):
1526 """Create a conditional statement from the given tag filter.
1528 :param tag_filter: Filter based on tags from the element specification.
1529 :type tag_filter: str
1530 :returns: Conditional statement which can be evaluated.
1536 index = InputData._end_of_tag(tag_filter, index)
1540 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1542 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1543 continue_on_error=False):
1544 """Filter required data from the given jobs and builds.
1546 The output data structure is:
1550 - test (or suite) 1 ID:
1556 - test (or suite) n ID:
1563 :param element: Element which will use the filtered data.
1564 :param params: Parameters which will be included in the output. If None,
1565 all parameters are included.
1566 :param data: If not None, this data is used instead of data specified
1568 :param data_set: The set of data to be filtered: tests, suites,
1570 :param continue_on_error: Continue if there is error while reading the
1571 data. The Item will be empty then
1572 :type element: pandas.Series
1576 :type continue_on_error: bool
1577 :returns: Filtered data.
1578 :rtype pandas.Series
1582 if data_set == "suites":
1584 elif element[u"filter"] in (u"all", u"template"):
1587 cond = InputData._condition(element[u"filter"])
1588 logging.debug(f" Filter: {cond}")
1590 logging.error(u" No filter defined.")
1594 params = element.get(u"parameters", None)
1596 params.append(u"type")
1598 data_to_filter = data if data else element[u"data"]
1601 for job, builds in data_to_filter.items():
1602 data[job] = pd.Series()
1603 for build in builds:
1604 data[job][str(build)] = pd.Series()
1607 self.data[job][str(build)][data_set].items())
1609 if continue_on_error:
1613 for test_id, test_data in data_dict.items():
1614 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1615 data[job][str(build)][test_id] = pd.Series()
1617 for param, val in test_data.items():
1618 data[job][str(build)][test_id][param] = val
1620 for param in params:
1622 data[job][str(build)][test_id][param] =\
1625 data[job][str(build)][test_id][param] =\
1629 except (KeyError, IndexError, ValueError) as err:
1631 f"Missing mandatory parameter in the element specification: "
1635 except AttributeError as err:
1636 logging.error(repr(err))
1638 except SyntaxError as err:
1640 f"The filter {cond} is not correct. Check if all tags are "
1641 f"enclosed by apostrophes.\n{repr(err)}"
1645 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1646 continue_on_error=False):
1647 """Filter required data from the given jobs and builds.
1649 The output data structure is:
1653 - test (or suite) 1 ID:
1659 - test (or suite) n ID:
1666 :param element: Element which will use the filtered data.
1667 :param params: Parameters which will be included in the output. If None,
1668 all parameters are included.
1669 :param data_set: The set of data to be filtered: tests, suites,
1671 :param continue_on_error: Continue if there is error while reading the
1672 data. The Item will be empty then
1673 :type element: pandas.Series
1676 :type continue_on_error: bool
1677 :returns: Filtered data.
1678 :rtype pandas.Series
1681 include = element.get(u"include", None)
1683 logging.warning(u"No tests to include, skipping the element.")
1687 params = element.get(u"parameters", None)
1689 params.append(u"type")
1693 for job, builds in element[u"data"].items():
1694 data[job] = pd.Series()
1695 for build in builds:
1696 data[job][str(build)] = pd.Series()
1697 for test in include:
1699 reg_ex = re.compile(str(test).lower())
1700 for test_id in self.data[job][
1701 str(build)][data_set].keys():
1702 if re.match(reg_ex, str(test_id).lower()):
1703 test_data = self.data[job][
1704 str(build)][data_set][test_id]
1705 data[job][str(build)][test_id] = pd.Series()
1707 for param, val in test_data.items():
1708 data[job][str(build)][test_id]\
1711 for param in params:
1713 data[job][str(build)][
1717 data[job][str(build)][
1718 test_id][param] = u"No Data"
1719 except KeyError as err:
1720 logging.error(repr(err))
1721 if continue_on_error:
1726 except (KeyError, IndexError, ValueError) as err:
1728 f"Missing mandatory parameter in the element "
1729 f"specification: {repr(err)}"
1732 except AttributeError as err:
1733 logging.error(repr(err))
1737 def merge_data(data):
1738 """Merge data from more jobs and builds to a simple data structure.
1740 The output data structure is:
1742 - test (suite) 1 ID:
1748 - test (suite) n ID:
1751 :param data: Data to merge.
1752 :type data: pandas.Series
1753 :returns: Merged data.
1754 :rtype: pandas.Series
1757 logging.info(u" Merging data ...")
1759 merged_data = pd.Series()
1760 for builds in data.values:
1761 for item in builds.values:
1762 for item_id, item_data in item.items():
1763 merged_data[item_id] = item_data
1767 def print_all_oper_data(self):
1768 """Print all operational data to console.
1776 u"Cycles per Packet",
1777 u"Average Vector Size"
1780 for job in self._input_data.values:
1781 for build in job.values:
1782 for test_id, test_data in build[u"tests"].items():
1784 if test_data.get(u"show-run", None) is None:
1786 for dut_name, data in test_data[u"show-run"].items():
1787 if data.get(u"threads", None) is None:
1789 print(f"Host IP: {data.get(u'host', '')}, "
1790 f"Socket: {data.get(u'socket', '')}")
1791 for thread_nr, thread in data[u"threads"].items():
1792 txt_table = prettytable.PrettyTable(tbl_hdr)
1795 txt_table.add_row(row)
1797 if len(thread) == 0:
1800 avg = f", Average Vector Size per Node: " \
1801 f"{(avg / len(thread)):.2f}"
1802 th_name = u"main" if thread_nr == 0 \
1803 else f"worker_{thread_nr}"
1804 print(f"{dut_name}, {th_name}{avg}")
1805 txt_table.float_format = u".2"
1806 txt_table.align = u"r"
1807 txt_table.align[u"Name"] = u"l"
1808 print(f"{txt_table.get_string()}\n")