1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_CPS_MSG_INFO = re.compile(
233 r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234 r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
236 REGEX_PPS_MSG_INFO = re.compile(
237 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
240 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
242 REGEX_VSAP_MSG_INFO = re.compile(
243 r'Transfer Rate: (\d*.\d*).*\n'
244 r'Latency: (\d*.\d*).*\n'
245 r'Completed requests: (\d*).*\n'
246 r'Failed requests: (\d*).*\n'
247 r'Total data transferred: (\d*).*\n'
248 r'Connection [cr]ps rate:\s*(\d*.\d*)'
251 # Needed for CPS and PPS tests
252 REGEX_NDRPDR_LAT_BASE = re.compile(
253 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
256 REGEX_NDRPDR_LAT = re.compile(
257 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
265 REGEX_VERSION_VPP = re.compile(
266 r"(VPP Version:\s*|VPP version:\s*)(.*)"
268 REGEX_VERSION_DPDK = re.compile(
269 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
271 REGEX_TCP = re.compile(
272 r'Total\s(rps|cps|throughput):\s(\d*).*$'
274 REGEX_MRR = re.compile(
275 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
276 r'tx\s(\d*),\srx\s(\d*)'
278 REGEX_BMRR = re.compile(
279 r'.*trial results.*: \[(.*)\]'
281 REGEX_RECONF_LOSS = re.compile(
282 r'Packets lost due to reconfig: (\d*)'
284 REGEX_RECONF_TIME = re.compile(
285 r'Implied time lost: (\d*.[\de-]*)'
287 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
289 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
291 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
293 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
295 REGEX_SH_RUN_HOST = re.compile(
296 r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
299 def __init__(self, metadata, mapping, ignore, process_oper):
302 :param metadata: Key-value pairs to be included in "metadata" part of
304 :param mapping: Mapping of the old names of test cases to the new
306 :param ignore: List of TCs to be ignored.
307 :param process_oper: If True, operational data (show run, telemetry) is
312 :type process_oper: bool
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 self._process_oper = process_oper
335 # Number of PAPI History messages found:
337 # 1 - PAPI History of DUT1
338 # 2 - PAPI History of DUT2
339 self._conf_history_lookup_nr = 0
341 self._sh_run_counter = 0
342 self._telemetry_kw_counter = 0
343 self._telemetry_msg_counter = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 u"metadata": OrderedDict(),
352 u"suites": OrderedDict(),
353 u"tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data[u"metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 u"vpp-version": self._get_vpp_version,
364 u"dpdk-version": self._get_dpdk_version,
365 u"teardown-papi-history": self._get_papi_history,
366 u"test-show-runtime": self._get_show_run,
367 u"testbed": self._get_testbed,
368 u"test-telemetry": self._get_telemetry
373 """Getter - Data parsed from the XML file.
375 :returns: Data parsed from the XML file.
380 def _get_data_from_mrr_test_msg(self, msg):
381 """Get info from message of MRR performance tests.
383 :param msg: Message to be processed.
385 :returns: Processed message or original message if a problem occurs.
389 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
390 if not groups or groups.lastindex != 1:
391 return u"Test Failed."
394 data = groups.group(1).split(u", ")
395 except (AttributeError, IndexError, ValueError, KeyError):
396 return u"Test Failed."
401 out_str += f"{(float(item) / 1e6):.2f}, "
402 return out_str[:-2] + u"]"
403 except (AttributeError, IndexError, ValueError, KeyError):
404 return u"Test Failed."
406 def _get_data_from_cps_test_msg(self, msg):
407 """Get info from message of NDRPDR CPS tests.
409 :param msg: Message to be processed.
411 :returns: Processed message or "Test Failed." if a problem occurs.
415 groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
416 if not groups or groups.lastindex != 2:
417 return u"Test Failed."
421 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
422 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
424 except (AttributeError, IndexError, ValueError, KeyError):
425 return u"Test Failed."
427 def _get_data_from_pps_test_msg(self, msg):
428 """Get info from message of NDRPDR PPS tests.
430 :param msg: Message to be processed.
432 :returns: Processed message or "Test Failed." if a problem occurs.
436 groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
437 if not groups or groups.lastindex != 4:
438 return u"Test Failed."
442 f"1. {(float(groups.group(1)) / 1e6):5.2f} "
443 f"{float(groups.group(2)):5.2f}\n"
444 f"2. {(float(groups.group(3)) / 1e6):5.2f} "
445 f"{float(groups.group(4)):5.2f}"
447 except (AttributeError, IndexError, ValueError, KeyError):
448 return u"Test Failed."
450 def _get_data_from_perf_test_msg(self, msg):
451 """Get info from message of NDRPDR performance tests.
453 :param msg: Message to be processed.
455 :returns: Processed message or "Test Failed." if a problem occurs.
459 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
460 if not groups or groups.lastindex != 10:
461 return u"Test Failed."
465 u"ndr_low": float(groups.group(1)),
466 u"ndr_low_b": float(groups.group(2)),
467 u"pdr_low": float(groups.group(3)),
468 u"pdr_low_b": float(groups.group(4)),
469 u"pdr_lat_90_1": groups.group(5),
470 u"pdr_lat_90_2": groups.group(6),
471 u"pdr_lat_50_1": groups.group(7),
472 u"pdr_lat_50_2": groups.group(8),
473 u"pdr_lat_10_1": groups.group(9),
474 u"pdr_lat_10_2": groups.group(10),
476 except (AttributeError, IndexError, ValueError, KeyError):
477 return u"Test Failed."
479 def _process_lat(in_str_1, in_str_2):
480 """Extract P50, P90 and P99 latencies or min, avg, max values from
483 :param in_str_1: Latency string for one direction produced by robot
485 :param in_str_2: Latency string for second direction produced by
489 :returns: Processed latency string or None if a problem occurs.
492 in_list_1 = in_str_1.split('/', 3)
493 in_list_2 = in_str_2.split('/', 3)
495 if len(in_list_1) != 4 and len(in_list_2) != 4:
498 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
500 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
501 except hdrh.codec.HdrLengthException:
504 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
506 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
507 except hdrh.codec.HdrLengthException:
510 if hdr_lat_1 and hdr_lat_2:
512 hdr_lat_1.get_value_at_percentile(50.0),
513 hdr_lat_1.get_value_at_percentile(90.0),
514 hdr_lat_1.get_value_at_percentile(99.0),
515 hdr_lat_2.get_value_at_percentile(50.0),
516 hdr_lat_2.get_value_at_percentile(90.0),
517 hdr_lat_2.get_value_at_percentile(99.0)
523 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
524 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
527 if item in (-1, 4294967295, 0):
533 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
534 f"{data[u'ndr_low_b']:5.2f}"
535 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
536 f"{data[u'pdr_low_b']:5.2f}"
539 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
540 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
541 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
544 max_len = len(str(max((max(item) for item in latency))))
545 max_len = 4 if max_len < 4 else max_len
547 for idx, lat in enumerate(latency):
552 f"{lat[0]:{max_len}d} "
553 f"{lat[1]:{max_len}d} "
554 f"{lat[2]:{max_len}d} "
555 f"{lat[3]:{max_len}d} "
556 f"{lat[4]:{max_len}d} "
557 f"{lat[5]:{max_len}d} "
562 except (AttributeError, IndexError, ValueError, KeyError):
563 return u"Test Failed."
565 def _get_testbed(self, msg):
566 """Called when extraction of testbed IP is required.
567 The testbed is identified by TG node IP address.
569 :param msg: Message to process.
574 if msg.message.count(u"Setup of TG node") or \
575 msg.message.count(u"Setup of node TG host"):
576 reg_tg_ip = re.compile(
577 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
579 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
580 except (KeyError, ValueError, IndexError, AttributeError):
583 self._data[u"metadata"][u"testbed"] = self._testbed
584 self._msg_type = None
586 def _get_vpp_version(self, msg):
587 """Called when extraction of VPP version is required.
589 :param msg: Message to process.
594 if msg.message.count(u"VPP version:") or \
595 msg.message.count(u"VPP Version:"):
597 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
599 self._data[u"metadata"][u"version"] = self._version
600 self._msg_type = None
601 logging.info(self._version)
603 def _get_dpdk_version(self, msg):
604 """Called when extraction of DPDK version is required.
606 :param msg: Message to process.
611 if msg.message.count(u"DPDK Version:"):
613 self._version = str(re.search(
614 self.REGEX_VERSION_DPDK, msg.message).group(2))
615 self._data[u"metadata"][u"version"] = self._version
619 self._msg_type = None
621 def _get_papi_history(self, msg):
622 """Called when extraction of PAPI command history is required.
624 :param msg: Message to process.
628 if msg.message.count(u"PAPI command history:"):
629 self._conf_history_lookup_nr += 1
630 if self._conf_history_lookup_nr == 1:
631 self._data[u"tests"][self._test_id][u"conf-history"] = str()
633 self._msg_type = None
635 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
639 ).replace(u'"', u"'")
640 self._data[u"tests"][self._test_id][u"conf-history"] += (
641 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
644 def _get_show_run(self, msg):
645 """Called when extraction of VPP operational data (output of CLI command
646 Show Runtime) is required.
648 :param msg: Message to process.
653 if not msg.message.count(u"stats runtime"):
657 if self._sh_run_counter > 1:
660 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
661 self._data[u"tests"][self._test_id][u"show-run"] = dict()
663 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
667 host = groups.group(1)
668 except (AttributeError, IndexError):
671 sock = groups.group(2)
672 except (AttributeError, IndexError):
675 dut = u"dut{nr}".format(
676 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
678 self._data[u'tests'][self._test_id][u'show-run'][dut] = \
683 u"runtime": str(msg.message).replace(u' ', u'').
684 replace(u'\n', u'').replace(u"'", u'"').
685 replace(u'b"', u'"').replace(u'u"', u'"').
690 def _get_telemetry(self, msg):
691 """Called when extraction of VPP telemetry data is required.
693 :param msg: Message to process.
698 if self._telemetry_kw_counter > 1:
700 if not msg.message.count(u"# TYPE vpp_runtime_calls"):
703 if u"telemetry-show-run" not in \
704 self._data[u"tests"][self._test_id].keys():
705 self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
707 self._telemetry_msg_counter += 1
708 groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
712 host = groups.group(1)
713 except (AttributeError, IndexError):
716 sock = groups.group(2)
717 except (AttributeError, IndexError):
720 u"source_type": u"node",
722 u"msg_type": u"metric",
723 u"log_level": u"INFO",
724 u"timestamp": msg.timestamp,
725 u"msg": u"show_runtime",
730 for line in msg.message.splitlines():
731 if not line.startswith(u"vpp_runtime_"):
734 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
735 cut = params.index(u"{")
736 name = params[:cut].split(u"_", maxsplit=2)[-1]
738 u"dict" + params[cut:].replace('{', '(').replace('}', ')')
740 labels[u"graph_node"] = labels.pop(u"name")
741 runtime[u"data"].append(
745 u"timestamp": timestamp,
749 except (TypeError, ValueError, IndexError):
751 self._data[u'tests'][self._test_id][u'telemetry-show-run']\
752 [f"dut{self._telemetry_msg_counter}"] = copy.copy(
760 def _get_ndrpdr_throughput(self, msg):
761 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
764 :param msg: The test message to be parsed.
766 :returns: Parsed data as a dict and the status (PASS/FAIL).
767 :rtype: tuple(dict, str)
771 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
772 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
775 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
777 if groups is not None:
779 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
780 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
781 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
782 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
784 except (IndexError, ValueError):
787 return throughput, status
789 def _get_ndrpdr_throughput_gbps(self, msg):
790 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
793 :param msg: The test message to be parsed.
795 :returns: Parsed data as a dict and the status (PASS/FAIL).
796 :rtype: tuple(dict, str)
800 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
801 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
804 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
806 if groups is not None:
808 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
809 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
810 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
811 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
813 except (IndexError, ValueError):
818 def _get_plr_throughput(self, msg):
819 """Get PLRsearch lower bound and PLRsearch upper bound from the test
822 :param msg: The test message to be parsed.
824 :returns: Parsed data as a dict and the status (PASS/FAIL).
825 :rtype: tuple(dict, str)
833 groups = re.search(self.REGEX_PLR_RATE, msg)
835 if groups is not None:
837 throughput[u"LOWER"] = float(groups.group(1))
838 throughput[u"UPPER"] = float(groups.group(2))
840 except (IndexError, ValueError):
843 return throughput, status
845 def _get_ndrpdr_latency(self, msg):
846 """Get LATENCY from the test message.
848 :param msg: The test message to be parsed.
850 :returns: Parsed data as a dict and the status (PASS/FAIL).
851 :rtype: tuple(dict, str)
861 u"direction1": copy.copy(latency_default),
862 u"direction2": copy.copy(latency_default)
865 u"direction1": copy.copy(latency_default),
866 u"direction2": copy.copy(latency_default)
869 u"direction1": copy.copy(latency_default),
870 u"direction2": copy.copy(latency_default)
873 u"direction1": copy.copy(latency_default),
874 u"direction2": copy.copy(latency_default)
877 u"direction1": copy.copy(latency_default),
878 u"direction2": copy.copy(latency_default)
881 u"direction1": copy.copy(latency_default),
882 u"direction2": copy.copy(latency_default)
886 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
888 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
890 return latency, u"FAIL"
892 def process_latency(in_str):
893 """Return object with parsed latency values.
895 TODO: Define class for the return type.
897 :param in_str: Input string, min/avg/max/hdrh format.
899 :returns: Dict with corresponding keys, except hdrh float values.
901 :throws IndexError: If in_str does not have enough substrings.
902 :throws ValueError: If a substring does not convert to float.
904 in_list = in_str.split('/', 3)
907 u"min": float(in_list[0]),
908 u"avg": float(in_list[1]),
909 u"max": float(in_list[2]),
913 if len(in_list) == 4:
914 rval[u"hdrh"] = str(in_list[3])
919 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
920 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
921 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
922 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
923 if groups.lastindex == 4:
924 return latency, u"PASS"
925 except (IndexError, ValueError):
929 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
930 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
931 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
932 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
933 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
934 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
935 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
936 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
937 if groups.lastindex == 12:
938 return latency, u"PASS"
939 except (IndexError, ValueError):
942 return latency, u"FAIL"
945 def _get_hoststack_data(msg, tags):
946 """Get data from the hoststack test message.
948 :param msg: The test message to be parsed.
949 :param tags: Test tags.
952 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
953 :rtype: tuple(dict, str)
958 msg = msg.replace(u"'", u'"').replace(u" ", u"")
959 if u"LDPRELOAD" in tags:
963 except JSONDecodeError:
965 elif u"VPPECHO" in tags:
967 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
969 client=loads(msg_lst[0]),
970 server=loads(msg_lst[1])
973 except (JSONDecodeError, IndexError):
976 return result, status
978 def _get_vsap_data(self, msg, tags):
979 """Get data from the vsap test message.
981 :param msg: The test message to be parsed.
982 :param tags: Test tags.
985 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
986 :rtype: tuple(dict, str)
991 groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
992 if groups is not None:
994 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
995 result[u"latency"] = float(groups.group(2))
996 result[u"completed-requests"] = int(groups.group(3))
997 result[u"failed-requests"] = int(groups.group(4))
998 result[u"bytes-transferred"] = int(groups.group(5))
999 if u"TCP_CPS"in tags:
1000 result[u"cps"] = float(groups.group(6))
1001 elif u"TCP_RPS" in tags:
1002 result[u"rps"] = float(groups.group(6))
1004 return result, status
1006 except (IndexError, ValueError):
1009 return result, status
1011 def visit_suite(self, suite):
1012 """Implements traversing through the suite and its direct children.
1014 :param suite: Suite to process.
1018 if self.start_suite(suite) is not False:
1019 suite.suites.visit(self)
1020 suite.tests.visit(self)
1021 self.end_suite(suite)
1023 def start_suite(self, suite):
1024 """Called when suite starts.
1026 :param suite: Suite to process.
1032 parent_name = suite.parent.name
1033 except AttributeError:
1036 self._data[u"suites"][suite.longname.lower().
1037 replace(u'"', u"'").
1038 replace(u" ", u"_")] = {
1039 u"name": suite.name.lower(),
1041 u"parent": parent_name,
1042 u"level": len(suite.longname.split(u"."))
1045 suite.setup.visit(self)
1046 suite.body.visit(self)
1047 suite.teardown.visit(self)
1049 def end_suite(self, suite):
1050 """Called when suite ends.
1052 :param suite: Suite to process.
1057 def visit_test(self, test):
1058 """Implements traversing through the test.
1060 :param test: Test to process.
1064 if self.start_test(test) is not False:
1065 test.setup.visit(self)
1066 test.body.visit(self)
1067 test.teardown.visit(self)
1070 def start_test(self, test):
1071 """Called when test starts.
1073 :param test: Test to process.
1078 self._sh_run_counter = 0
1079 self._telemetry_kw_counter = 0
1080 self._telemetry_msg_counter = 0
1082 longname_orig = test.longname.lower()
1084 # Check the ignore list
1085 if longname_orig in self._ignore:
1088 tags = [str(tag) for tag in test.tags]
1089 test_result = dict()
1091 # Change the TC long name and name if defined in the mapping table
1092 longname = self._mapping.get(longname_orig, None)
1093 if longname is not None:
1094 name = longname.split(u'.')[-1]
1096 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1100 longname = longname_orig
1101 name = test.name.lower()
1103 # Remove TC number from the TC long name (backward compatibility):
1104 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1105 # Remove TC number from the TC name (not needed):
1106 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1108 test_result[u"parent"] = test.parent.name.lower()
1109 test_result[u"tags"] = tags
1110 test_result["doc"] = test.doc
1111 test_result[u"type"] = u""
1112 test_result[u"status"] = test.status
1113 test_result[u"starttime"] = test.starttime
1114 test_result[u"endtime"] = test.endtime
1116 if test.status == u"PASS":
1117 if u"NDRPDR" in tags:
1118 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1119 test_result[u"msg"] = self._get_data_from_pps_test_msg(
1121 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1122 test_result[u"msg"] = self._get_data_from_cps_test_msg(
1125 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1127 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1128 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1131 test_result[u"msg"] = test.message
1133 test_result[u"msg"] = test.message
1135 if u"PERFTEST" in tags and u"TREX" not in tags:
1136 # Replace info about cores (e.g. -1c-) with the info about threads
1137 # and cores (e.g. -1t1c-) in the long test case names and in the
1138 # test case names if necessary.
1141 for tag in test_result[u"tags"]:
1142 groups = re.search(self.REGEX_TC_TAG, tag)
1148 self._test_id = re.sub(
1149 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1150 self._test_id, count=1
1152 test_result[u"name"] = re.sub(
1153 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1154 test_result["name"], count=1
1157 test_result[u"status"] = u"FAIL"
1158 self._data[u"tests"][self._test_id] = test_result
1160 f"The test {self._test_id} has no or more than one "
1161 f"multi-threading tags.\n"
1162 f"Tags: {test_result[u'tags']}"
1166 if u"DEVICETEST" in tags:
1167 test_result[u"type"] = u"DEVICETEST"
1168 elif u"NDRPDR" in tags:
1169 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1170 test_result[u"type"] = u"CPS"
1172 test_result[u"type"] = u"NDRPDR"
1173 if test.status == u"PASS":
1174 test_result[u"throughput"], test_result[u"status"] = \
1175 self._get_ndrpdr_throughput(test.message)
1176 test_result[u"gbps"], test_result[u"status"] = \
1177 self._get_ndrpdr_throughput_gbps(test.message)
1178 test_result[u"latency"], test_result[u"status"] = \
1179 self._get_ndrpdr_latency(test.message)
1180 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1182 test_result[u"type"] = u"MRR"
1184 test_result[u"type"] = u"BMRR"
1185 if test.status == u"PASS":
1186 test_result[u"result"] = dict()
1187 groups = re.search(self.REGEX_BMRR, test.message)
1188 if groups is not None:
1189 items_str = groups.group(1)
1191 float(item.strip().replace(u"'", u""))
1192 for item in items_str.split(",")
1194 # Use whole list in CSIT-1180.
1195 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1196 test_result[u"result"][u"samples"] = items_float
1197 test_result[u"result"][u"receive-rate"] = stats.avg
1198 test_result[u"result"][u"receive-stdev"] = stats.stdev
1200 groups = re.search(self.REGEX_MRR, test.message)
1201 test_result[u"result"][u"receive-rate"] = \
1202 float(groups.group(3)) / float(groups.group(1))
1203 elif u"SOAK" in tags:
1204 test_result[u"type"] = u"SOAK"
1205 if test.status == u"PASS":
1206 test_result[u"throughput"], test_result[u"status"] = \
1207 self._get_plr_throughput(test.message)
1208 elif u"LDP_NGINX" in tags:
1209 test_result[u"type"] = u"LDP_NGINX"
1210 test_result[u"result"], test_result[u"status"] = \
1211 self._get_vsap_data(test.message, tags)
1212 elif u"HOSTSTACK" in tags:
1213 test_result[u"type"] = u"HOSTSTACK"
1214 if test.status == u"PASS":
1215 test_result[u"result"], test_result[u"status"] = \
1216 self._get_hoststack_data(test.message, tags)
1217 elif u"RECONF" in tags:
1218 test_result[u"type"] = u"RECONF"
1219 if test.status == u"PASS":
1220 test_result[u"result"] = None
1222 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1223 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1224 test_result[u"result"] = {
1225 u"loss": int(grps_loss.group(1)),
1226 u"time": float(grps_time.group(1))
1228 except (AttributeError, IndexError, ValueError, TypeError):
1229 test_result[u"status"] = u"FAIL"
1231 test_result[u"status"] = u"FAIL"
1233 self._data[u"tests"][self._test_id] = test_result
1235 def end_test(self, test):
1236 """Called when test ends.
1238 :param test: Test to process.
1243 def visit_keyword(self, keyword):
1244 """Implements traversing through the keyword and its child keywords.
1246 :param keyword: Keyword to process.
1247 :type keyword: Keyword
1250 if self.start_keyword(keyword) is not False:
1251 self.end_keyword(keyword)
1253 def start_keyword(self, keyword):
1254 """Called when keyword starts. Default implementation does nothing.
1256 :param keyword: Keyword to process.
1257 :type keyword: Keyword
1261 if keyword.type in ("setup", "SETUP"):
1262 self.visit_setup_kw(keyword)
1263 elif keyword.type in ("teardown", "TEARDOWN"):
1264 self.visit_teardown_kw(keyword)
1266 self.visit_test_kw(keyword)
1267 except AttributeError:
1270 def end_keyword(self, keyword):
1271 """Called when keyword ends. Default implementation does nothing.
1273 :param keyword: Keyword to process.
1274 :type keyword: Keyword
1278 def visit_test_kw(self, test_kw):
1279 """Implements traversing through the test keyword and its child
1282 :param test_kw: Keyword to process.
1283 :type test_kw: Keyword
1286 for keyword in test_kw.body:
1287 if self.start_test_kw(keyword) is not False:
1288 self.visit_test_kw(keyword)
1289 self.end_test_kw(keyword)
1291 def start_test_kw(self, test_kw):
1292 """Called when test keyword starts. Default implementation does
1295 :param test_kw: Keyword to process.
1296 :type test_kw: Keyword
1299 if not self._process_oper:
1302 if test_kw.name.count(u"Run Telemetry On All Duts"):
1303 self._msg_type = u"test-telemetry"
1304 self._telemetry_kw_counter += 1
1305 elif test_kw.name.count(u"Show Runtime On All Duts"):
1306 self._msg_type = u"test-show-runtime"
1307 self._sh_run_counter += 1
1310 test_kw.messages.visit(self)
1312 def end_test_kw(self, test_kw):
1313 """Called when keyword ends. Default implementation does nothing.
1315 :param test_kw: Keyword to process.
1316 :type test_kw: Keyword
1320 def visit_setup_kw(self, setup_kw):
1321 """Implements traversing through the teardown keyword and its child
1324 :param setup_kw: Keyword to process.
1325 :type setup_kw: Keyword
1328 for keyword in setup_kw.setup:
1329 if self.start_setup_kw(keyword) is not False:
1330 self.visit_setup_kw(keyword)
1331 self.end_setup_kw(keyword)
1332 for keyword in setup_kw.body:
1333 if self.start_setup_kw(keyword) is not False:
1334 self.visit_setup_kw(keyword)
1335 self.end_setup_kw(keyword)
1337 def start_setup_kw(self, setup_kw):
1338 """Called when teardown keyword starts. Default implementation does
1341 :param setup_kw: Keyword to process.
1342 :type setup_kw: Keyword
1345 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1346 and not self._version:
1347 self._msg_type = u"vpp-version"
1348 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1350 self._msg_type = u"dpdk-version"
1351 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1352 self._msg_type = u"testbed"
1355 setup_kw.messages.visit(self)
1357 def end_setup_kw(self, setup_kw):
1358 """Called when keyword ends. Default implementation does nothing.
1360 :param setup_kw: Keyword to process.
1361 :type setup_kw: Keyword
1365 def visit_teardown_kw(self, teardown_kw):
1366 """Implements traversing through the teardown keyword and its child
1369 :param teardown_kw: Keyword to process.
1370 :type teardown_kw: Keyword
1373 for keyword in teardown_kw.body:
1374 if self.start_teardown_kw(keyword) is not False:
1375 self.visit_teardown_kw(keyword)
1376 self.end_teardown_kw(keyword)
1378 def start_teardown_kw(self, teardown_kw):
1379 """Called when teardown keyword starts
1381 :param teardown_kw: Keyword to process.
1382 :type teardown_kw: Keyword
1385 if teardown_kw.name.count(u"Show Papi History On All Duts"):
1386 self._conf_history_lookup_nr = 0
1387 self._msg_type = u"teardown-papi-history"
1388 teardown_kw.messages.visit(self)
1390 def end_teardown_kw(self, teardown_kw):
1391 """Called when keyword ends. Default implementation does nothing.
1393 :param teardown_kw: Keyword to process.
1394 :type teardown_kw: Keyword
1398 def visit_message(self, msg):
1399 """Implements visiting the message.
1401 :param msg: Message to process.
1405 if self.start_message(msg) is not False:
1406 self.end_message(msg)
1408 def start_message(self, msg):
1409 """Called when message starts. Get required information from messages:
1412 :param msg: Message to process.
1417 self.parse_msg[self._msg_type](msg)
1419 def end_message(self, msg):
1420 """Called when message ends. Default implementation does nothing.
1422 :param msg: Message to process.
1431 The data is extracted from output.xml files generated by Jenkins jobs and
1432 stored in pandas' DataFrames.
1438 (as described in ExecutionChecker documentation)
1440 (as described in ExecutionChecker documentation)
1442 (as described in ExecutionChecker documentation)
1445 def __init__(self, spec, for_output):
1448 :param spec: Specification.
1449 :param for_output: Output to be generated from downloaded data.
1450 :type spec: Specification
1451 :type for_output: str
1457 self._for_output = for_output
1460 self._input_data = pd.Series(dtype="float64")
1464 """Getter - Input data.
1466 :returns: Input data
1467 :rtype: pandas.Series
1469 return self._input_data
1471 def metadata(self, job, build):
1472 """Getter - metadata
1474 :param job: Job which metadata we want.
1475 :param build: Build which metadata we want.
1479 :rtype: pandas.Series
1481 return self.data[job][build][u"metadata"]
1483 def suites(self, job, build):
1486 :param job: Job which suites we want.
1487 :param build: Build which suites we want.
1491 :rtype: pandas.Series
1493 return self.data[job][str(build)][u"suites"]
1495 def tests(self, job, build):
1498 :param job: Job which tests we want.
1499 :param build: Build which tests we want.
1503 :rtype: pandas.Series
1505 return self.data[job][build][u"tests"]
1507 def _parse_tests(self, job, build):
1508 """Process data from robot output.xml file and return JSON structured
1511 :param job: The name of job which build output data will be processed.
1512 :param build: The build which output data will be processed.
1515 :returns: JSON data structure.
1524 with open(build[u"file-name"], u'r') as data_file:
1526 result = ExecutionResult(data_file)
1527 except errors.DataError as err:
1529 f"Error occurred while parsing output.xml: {repr(err)}"
1533 process_oper = False
1534 if u"-vpp-perf-report-coverage-" in job:
1536 # elif u"-vpp-perf-report-iterative-" in job:
1537 # # Exceptions for TBs where we do not have coverage data:
1538 # for item in (u"-2n-icx", ):
1540 # process_oper = True
1542 checker = ExecutionChecker(
1543 metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1545 result.visit(checker)
1547 checker.data[u"metadata"][u"tests_total"] = \
1548 result.statistics.total.total
1549 checker.data[u"metadata"][u"tests_passed"] = \
1550 result.statistics.total.passed
1551 checker.data[u"metadata"][u"tests_failed"] = \
1552 result.statistics.total.failed
1553 checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1554 checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1558 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1559 """Download and parse the input data file.
1561 :param pid: PID of the process executing this method.
1562 :param job: Name of the Jenkins job which generated the processed input
1564 :param build: Information about the Jenkins build which generated the
1565 processed input file.
1566 :param repeat: Repeat the download specified number of times if not
1574 logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1581 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1587 f"It is not possible to download the input data file from the "
1588 f"job {job}, build {build[u'build']}, or it is damaged. "
1592 logging.info(f" Processing data from build {build[u'build']}")
1593 data = self._parse_tests(job, build)
1596 f"Input data file from the job {job}, build "
1597 f"{build[u'build']} is damaged. Skipped."
1600 state = u"processed"
1603 remove(build[u"file-name"])
1604 except OSError as err:
1606 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1609 # If the time-period is defined in the specification file, remove all
1610 # files which are outside the time period.
1612 timeperiod = self._cfg.environment.get(u"time-period", None)
1613 if timeperiod and data:
1615 timeperiod = timedelta(int(timeperiod))
1616 metadata = data.get(u"metadata", None)
1618 generated = metadata.get(u"generated", None)
1620 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1621 if (now - generated) > timeperiod:
1622 # Remove the data and the file:
1627 f" The build {job}/{build[u'build']} is "
1628 f"outdated, will be removed."
1638 def download_and_parse_data(self, repeat=1):
1639 """Download the input data files, parse input data from input files and
1640 store in pandas' Series.
1642 :param repeat: Repeat the download specified number of times if not
1647 logging.info(u"Downloading and parsing input files ...")
1649 for job, builds in self._cfg.input.items():
1650 for build in builds:
1652 result = self._download_and_parse_build(job, build, repeat)
1655 build_nr = result[u"build"][u"build"]
1658 data = result[u"data"]
1659 build_data = pd.Series({
1660 u"metadata": pd.Series(
1661 list(data[u"metadata"].values()),
1662 index=list(data[u"metadata"].keys())
1664 u"suites": pd.Series(
1665 list(data[u"suites"].values()),
1666 index=list(data[u"suites"].keys())
1668 u"tests": pd.Series(
1669 list(data[u"tests"].values()),
1670 index=list(data[u"tests"].keys())
1674 if self._input_data.get(job, None) is None:
1675 self._input_data[job] = pd.Series(dtype="float64")
1676 self._input_data[job][str(build_nr)] = build_data
1677 self._cfg.set_input_file_name(
1678 job, build_nr, result[u"build"][u"file-name"]
1680 self._cfg.set_input_state(job, build_nr, result[u"state"])
1683 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1684 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1686 logging.info(u"Done.")
1688 msg = f"Successful downloads from the sources:\n"
1689 for source in self._cfg.environment[u"data-sources"]:
1690 if source[u"successful-downloads"]:
1692 f"{source[u'url']}/{source[u'path']}/"
1693 f"{source[u'file-name']}: "
1694 f"{source[u'successful-downloads']}\n"
1698 def process_local_file(self, local_file, job=u"local", build_nr=1,
1700 """Process local XML file given as a command-line parameter.
1702 :param local_file: The file to process.
1703 :param job: Job name.
1704 :param build_nr: Build number.
1705 :param replace: If True, the information about jobs and builds is
1706 replaced by the new one, otherwise the new jobs and builds are
1708 :type local_file: str
1712 :raises: PresentationError if an error occurs.
1714 if not isfile(local_file):
1715 raise PresentationError(f"The file {local_file} does not exist.")
1718 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1719 except (IndexError, ValueError):
1724 u"status": u"failed",
1725 u"file-name": local_file
1728 self._cfg.input = dict()
1729 self._cfg.add_build(job, build)
1731 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1732 data = self._parse_tests(job, build)
1734 raise PresentationError(
1735 f"Error occurred while parsing the file {local_file}"
1738 build_data = pd.Series({
1739 u"metadata": pd.Series(
1740 list(data[u"metadata"].values()),
1741 index=list(data[u"metadata"].keys())
1743 u"suites": pd.Series(
1744 list(data[u"suites"].values()),
1745 index=list(data[u"suites"].keys())
1747 u"tests": pd.Series(
1748 list(data[u"tests"].values()),
1749 index=list(data[u"tests"].keys())
1753 if self._input_data.get(job, None) is None:
1754 self._input_data[job] = pd.Series(dtype="float64")
1755 self._input_data[job][str(build_nr)] = build_data
1757 self._cfg.set_input_state(job, build_nr, u"processed")
1759 def process_local_directory(self, local_dir, replace=True):
1760 """Process local directory with XML file(s). The directory is processed
1761 as a 'job' and the XML files in it as builds.
1762 If the given directory contains only sub-directories, these
1763 sub-directories processed as jobs and corresponding XML files as builds
1766 :param local_dir: Local directory to process.
1767 :param replace: If True, the information about jobs and builds is
1768 replaced by the new one, otherwise the new jobs and builds are
1770 :type local_dir: str
1773 if not isdir(local_dir):
1774 raise PresentationError(
1775 f"The directory {local_dir} does not exist."
1778 # Check if the given directory includes only files, or only directories
1779 _, dirnames, filenames = next(walk(local_dir))
1781 if filenames and not dirnames:
1784 # key: dir (job) name, value: list of file names (builds)
1786 local_dir: [join(local_dir, name) for name in filenames]
1789 elif dirnames and not filenames:
1792 # key: dir (job) name, value: list of file names (builds)
1793 local_builds = dict()
1794 for dirname in dirnames:
1796 join(local_dir, dirname, name)
1797 for name in listdir(join(local_dir, dirname))
1798 if isfile(join(local_dir, dirname, name))
1801 local_builds[dirname] = sorted(builds)
1803 elif not filenames and not dirnames:
1804 raise PresentationError(f"The directory {local_dir} is empty.")
1806 raise PresentationError(
1807 f"The directory {local_dir} can include only files or only "
1808 f"directories, not both.\nThe directory {local_dir} includes "
1809 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1813 self._cfg.input = dict()
1815 for job, files in local_builds.items():
1816 for idx, local_file in enumerate(files):
1817 self.process_local_file(local_file, job, idx + 1, replace=False)
1820 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1821 """Return the index of character in the string which is the end of tag.
1823 :param tag_filter: The string where the end of tag is being searched.
1824 :param start: The index where the searching is stated.
1825 :param closer: The character which is the tag closer.
1826 :type tag_filter: str
1829 :returns: The index of the tag closer.
1833 idx_opener = tag_filter.index(closer, start)
1834 return tag_filter.index(closer, idx_opener + 1)
1839 def _condition(tag_filter):
1840 """Create a conditional statement from the given tag filter.
1842 :param tag_filter: Filter based on tags from the element specification.
1843 :type tag_filter: str
1844 :returns: Conditional statement which can be evaluated.
1849 index = InputData._end_of_tag(tag_filter, index)
1853 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1855 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1856 continue_on_error=False):
1857 """Filter required data from the given jobs and builds.
1859 The output data structure is:
1862 - test (or suite) 1 ID:
1868 - test (or suite) n ID:
1875 :param element: Element which will use the filtered data.
1876 :param params: Parameters which will be included in the output. If None,
1877 all parameters are included.
1878 :param data: If not None, this data is used instead of data specified
1880 :param data_set: The set of data to be filtered: tests, suites,
1882 :param continue_on_error: Continue if there is error while reading the
1883 data. The Item will be empty then
1884 :type element: pandas.Series
1888 :type continue_on_error: bool
1889 :returns: Filtered data.
1890 :rtype pandas.Series
1894 if data_set == "suites":
1896 elif element[u"filter"] in (u"all", u"template"):
1899 cond = InputData._condition(element[u"filter"])
1900 logging.debug(f" Filter: {cond}")
1902 logging.error(u" No filter defined.")
1906 params = element.get(u"parameters", None)
1908 params.extend((u"type", u"status"))
1910 data_to_filter = data if data else element[u"data"]
1911 data = pd.Series(dtype="float64")
1913 for job, builds in data_to_filter.items():
1914 data[job] = pd.Series(dtype="float64")
1915 for build in builds:
1916 data[job][str(build)] = pd.Series(dtype="float64")
1919 self.data[job][str(build)][data_set].items())
1921 if continue_on_error:
1925 for test_id, test_data in data_dict.items():
1926 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1927 data[job][str(build)][test_id] = \
1928 pd.Series(dtype="float64")
1930 for param, val in test_data.items():
1931 data[job][str(build)][test_id][param] = val
1933 for param in params:
1935 data[job][str(build)][test_id][param] =\
1938 data[job][str(build)][test_id][param] =\
1942 except (KeyError, IndexError, ValueError) as err:
1944 f"Missing mandatory parameter in the element specification: "
1948 except AttributeError as err:
1949 logging.error(repr(err))
1951 except SyntaxError as err:
1953 f"The filter {cond} is not correct. Check if all tags are "
1954 f"enclosed by apostrophes.\n{repr(err)}"
1958 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1959 continue_on_error=False):
1960 """Filter required data from the given jobs and builds.
1962 The output data structure is:
1965 - test (or suite) 1 ID:
1971 - test (or suite) n ID:
1978 :param element: Element which will use the filtered data.
1979 :param params: Parameters which will be included in the output. If None,
1980 all parameters are included.
1981 :param data_set: The set of data to be filtered: tests, suites,
1983 :param continue_on_error: Continue if there is error while reading the
1984 data. The Item will be empty then
1985 :type element: pandas.Series
1988 :type continue_on_error: bool
1989 :returns: Filtered data.
1990 :rtype pandas.Series
1993 include = element.get(u"include", None)
1995 logging.warning(u"No tests to include, skipping the element.")
1999 params = element.get(u"parameters", None)
2000 if params and u"type" not in params:
2001 params.append(u"type")
2003 cores = element.get(u"core", None)
2007 for test in include:
2008 tests.append(test.format(core=core))
2012 data = pd.Series(dtype="float64")
2014 for job, builds in element[u"data"].items():
2015 data[job] = pd.Series(dtype="float64")
2016 for build in builds:
2017 data[job][str(build)] = pd.Series(dtype="float64")
2020 reg_ex = re.compile(str(test).lower())
2021 for test_id in self.data[job][
2022 str(build)][data_set].keys():
2023 if re.match(reg_ex, str(test_id).lower()):
2024 test_data = self.data[job][
2025 str(build)][data_set][test_id]
2026 data[job][str(build)][test_id] = \
2027 pd.Series(dtype="float64")
2029 for param, val in test_data.items():
2030 data[job][str(build)][test_id]\
2033 for param in params:
2035 data[job][str(build)][
2039 data[job][str(build)][
2040 test_id][param] = u"No Data"
2041 except KeyError as err:
2042 if continue_on_error:
2043 logging.debug(repr(err))
2045 logging.error(repr(err))
2049 except (KeyError, IndexError, ValueError) as err:
2051 f"Missing mandatory parameter in the element "
2052 f"specification: {repr(err)}"
2055 except AttributeError as err:
2056 logging.error(repr(err))
2060 def merge_data(data):
2061 """Merge data from more jobs and builds to a simple data structure.
2063 The output data structure is:
2065 - test (suite) 1 ID:
2071 - test (suite) n ID:
2074 :param data: Data to merge.
2075 :type data: pandas.Series
2076 :returns: Merged data.
2077 :rtype: pandas.Series
2080 logging.info(u" Merging data ...")
2082 merged_data = pd.Series(dtype="float64")
2083 for builds in data.values:
2084 for item in builds.values:
2085 for item_id, item_data in item.items():
2086 merged_data[item_id] = item_data
2089 def print_all_oper_data(self):
2090 """Print all operational data to console.
2093 for job in self._input_data.values:
2094 for build in job.values:
2095 for test_id, test_data in build[u"tests"].items():
2097 if test_data.get(u"show-run", None) is None:
2099 for dut_name, data in test_data[u"show-run"].items():
2100 if data.get(u"runtime", None) is None:
2102 runtime = loads(data[u"runtime"])
2104 threads_nr = len(runtime[0][u"clocks"])
2105 except (IndexError, KeyError):
2107 threads = OrderedDict(
2108 {idx: list() for idx in range(threads_nr)})
2109 for item in runtime:
2110 for idx in range(threads_nr):
2111 if item[u"vectors"][idx] > 0:
2112 clocks = item[u"clocks"][idx] / \
2113 item[u"vectors"][idx]
2114 elif item[u"calls"][idx] > 0:
2115 clocks = item[u"clocks"][idx] / \
2117 elif item[u"suspends"][idx] > 0:
2118 clocks = item[u"clocks"][idx] / \
2119 item[u"suspends"][idx]
2123 if item[u"calls"][idx] > 0:
2124 vectors_call = item[u"vectors"][idx] / \
2129 if int(item[u"calls"][idx]) + int(
2130 item[u"vectors"][idx]) + \
2131 int(item[u"suspends"][idx]):
2132 threads[idx].append([
2134 item[u"calls"][idx],
2135 item[u"vectors"][idx],
2136 item[u"suspends"][idx],
2141 print(f"Host IP: {data.get(u'host', '')}, "
2142 f"Socket: {data.get(u'socket', '')}")
2143 for thread_nr, thread in threads.items():
2144 txt_table = prettytable.PrettyTable(
2150 u"Cycles per Packet",
2151 u"Average Vector Size"
2156 txt_table.add_row(row)
2158 if len(thread) == 0:
2161 avg = f", Average Vector Size per Node: " \
2162 f"{(avg / len(thread)):.2f}"
2163 th_name = u"main" if thread_nr == 0 \
2164 else f"worker_{thread_nr}"
2165 print(f"{dut_name}, {th_name}{avg}")
2166 txt_table.float_format = u".2"
2167 txt_table.align = u"r"
2168 txt_table.align[u"Name"] = u"l"
2169 print(f"{txt_table.get_string()}\n")