1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_CPS_MSG_INFO = re.compile(
233 r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234 r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
236 REGEX_PPS_MSG_INFO = re.compile(
237 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
240 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
242 REGEX_VSAP_MSG_INFO = re.compile(
243 r'Transfer Rate: (\d*.\d*).*\n'
244 r'Latency: (\d*.\d*).*\n'
245 r'Completed requests: (\d*).*\n'
246 r'Failed requests: (\d*).*\n'
247 r'Total data transferred: (\d*).*\n'
248 r'Connection [cr]ps rate:\s*(\d*.\d*)'
251 # Needed for CPS and PPS tests
252 REGEX_NDRPDR_LAT_BASE = re.compile(
253 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
256 REGEX_NDRPDR_LAT = re.compile(
257 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
265 REGEX_VERSION_VPP = re.compile(
266 r"(return STDOUT Version:\s*|"
267 r"VPP Version:\s*|VPP version:\s*)(.*)"
269 REGEX_VERSION_DPDK = re.compile(
270 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
272 REGEX_TCP = re.compile(
273 r'Total\s(rps|cps|throughput):\s(\d*).*$'
275 REGEX_MRR = re.compile(
276 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277 r'tx\s(\d*),\srx\s(\d*)'
279 REGEX_BMRR = re.compile(
280 r'.*trial results.*: \[(.*)\]'
282 REGEX_RECONF_LOSS = re.compile(
283 r'Packets lost due to reconfig: (\d*)'
285 REGEX_RECONF_TIME = re.compile(
286 r'Implied time lost: (\d*.[\de-]*)'
288 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
290 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
292 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
294 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
296 REGEX_SH_RUN_HOST = re.compile(
297 r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
300 def __init__(self, metadata, mapping, ignore, process_oper):
303 :param metadata: Key-value pairs to be included in "metadata" part of
305 :param mapping: Mapping of the old names of test cases to the new
307 :param ignore: List of TCs to be ignored.
308 :param process_oper: If True, operational data (show run, telemetry) is
313 :type process_oper: bool
316 # Type of message to parse out from the test messages
317 self._msg_type = None
323 self._timestamp = None
325 # Testbed. The testbed is identified by TG node IP address.
328 # Mapping of TCs long names
329 self._mapping = mapping
332 self._ignore = ignore
334 self._process_oper = process_oper
336 # Number of PAPI History messages found:
338 # 1 - PAPI History of DUT1
339 # 2 - PAPI History of DUT2
340 self._conf_history_lookup_nr = 0
342 self._sh_run_counter = 0
343 self._telemetry_kw_counter = 0
344 self._telemetry_msg_counter = 0
346 # Test ID of currently processed test- the lowercase full path to the
350 # The main data structure
352 u"metadata": OrderedDict(),
353 u"suites": OrderedDict(),
354 u"tests": OrderedDict()
357 # Save the provided metadata
358 for key, val in metadata.items():
359 self._data[u"metadata"][key] = val
361 # Dictionary defining the methods used to parse different types of
364 u"vpp-version": self._get_vpp_version,
365 u"dpdk-version": self._get_dpdk_version,
366 u"teardown-papi-history": self._get_papi_history,
367 u"test-show-runtime": self._get_show_run,
368 u"testbed": self._get_testbed,
369 u"test-telemetry": self._get_telemetry
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_data_from_mrr_test_msg(self, msg):
382 """Get info from message of MRR performance tests.
384 :param msg: Message to be processed.
386 :returns: Processed message or original message if a problem occurs.
390 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
391 if not groups or groups.lastindex != 1:
392 return u"Test Failed."
395 data = groups.group(1).split(u", ")
396 except (AttributeError, IndexError, ValueError, KeyError):
397 return u"Test Failed."
402 out_str += f"{(float(item) / 1e6):.2f}, "
403 return out_str[:-2] + u"]"
404 except (AttributeError, IndexError, ValueError, KeyError):
405 return u"Test Failed."
407 def _get_data_from_cps_test_msg(self, msg):
408 """Get info from message of NDRPDR CPS tests.
410 :param msg: Message to be processed.
412 :returns: Processed message or "Test Failed." if a problem occurs.
416 groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
417 if not groups or groups.lastindex != 2:
418 return u"Test Failed."
422 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
423 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
425 except (AttributeError, IndexError, ValueError, KeyError):
426 return u"Test Failed."
428 def _get_data_from_pps_test_msg(self, msg):
429 """Get info from message of NDRPDR PPS tests.
431 :param msg: Message to be processed.
433 :returns: Processed message or "Test Failed." if a problem occurs.
437 groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
438 if not groups or groups.lastindex != 4:
439 return u"Test Failed."
443 f"1. {(float(groups.group(1)) / 1e6):5.2f} "
444 f"{float(groups.group(2)):5.2f}\n"
445 f"2. {(float(groups.group(3)) / 1e6):5.2f} "
446 f"{float(groups.group(4)):5.2f}"
448 except (AttributeError, IndexError, ValueError, KeyError):
449 return u"Test Failed."
451 def _get_data_from_perf_test_msg(self, msg):
452 """Get info from message of NDRPDR performance tests.
454 :param msg: Message to be processed.
456 :returns: Processed message or "Test Failed." if a problem occurs.
460 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
461 if not groups or groups.lastindex != 10:
462 return u"Test Failed."
466 u"ndr_low": float(groups.group(1)),
467 u"ndr_low_b": float(groups.group(2)),
468 u"pdr_low": float(groups.group(3)),
469 u"pdr_low_b": float(groups.group(4)),
470 u"pdr_lat_90_1": groups.group(5),
471 u"pdr_lat_90_2": groups.group(6),
472 u"pdr_lat_50_1": groups.group(7),
473 u"pdr_lat_50_2": groups.group(8),
474 u"pdr_lat_10_1": groups.group(9),
475 u"pdr_lat_10_2": groups.group(10),
477 except (AttributeError, IndexError, ValueError, KeyError):
478 return u"Test Failed."
480 def _process_lat(in_str_1, in_str_2):
481 """Extract P50, P90 and P99 latencies or min, avg, max values from
484 :param in_str_1: Latency string for one direction produced by robot
486 :param in_str_2: Latency string for second direction produced by
490 :returns: Processed latency string or None if a problem occurs.
493 in_list_1 = in_str_1.split('/', 3)
494 in_list_2 = in_str_2.split('/', 3)
496 if len(in_list_1) != 4 and len(in_list_2) != 4:
499 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
501 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
502 except hdrh.codec.HdrLengthException:
505 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
507 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
508 except hdrh.codec.HdrLengthException:
511 if hdr_lat_1 and hdr_lat_2:
513 hdr_lat_1.get_value_at_percentile(50.0),
514 hdr_lat_1.get_value_at_percentile(90.0),
515 hdr_lat_1.get_value_at_percentile(99.0),
516 hdr_lat_2.get_value_at_percentile(50.0),
517 hdr_lat_2.get_value_at_percentile(90.0),
518 hdr_lat_2.get_value_at_percentile(99.0)
524 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
525 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
528 if item in (-1, 4294967295, 0):
534 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
535 f"{data[u'ndr_low_b']:5.2f}"
536 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
537 f"{data[u'pdr_low_b']:5.2f}"
540 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
541 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
542 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
545 max_len = len(str(max((max(item) for item in latency))))
546 max_len = 4 if max_len < 4 else max_len
548 for idx, lat in enumerate(latency):
553 f"{lat[0]:{max_len}d} "
554 f"{lat[1]:{max_len}d} "
555 f"{lat[2]:{max_len}d} "
556 f"{lat[3]:{max_len}d} "
557 f"{lat[4]:{max_len}d} "
558 f"{lat[5]:{max_len}d} "
563 except (AttributeError, IndexError, ValueError, KeyError):
564 return u"Test Failed."
566 def _get_testbed(self, msg):
567 """Called when extraction of testbed IP is required.
568 The testbed is identified by TG node IP address.
570 :param msg: Message to process.
575 if msg.message.count(u"Setup of TG node") or \
576 msg.message.count(u"Setup of node TG host"):
577 reg_tg_ip = re.compile(
578 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
580 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
581 except (KeyError, ValueError, IndexError, AttributeError):
584 self._data[u"metadata"][u"testbed"] = self._testbed
585 self._msg_type = None
587 def _get_vpp_version(self, msg):
588 """Called when extraction of VPP version is required.
590 :param msg: Message to process.
595 if msg.message.count(u"return STDOUT Version:") or \
596 msg.message.count(u"VPP Version:") or \
597 msg.message.count(u"VPP version:"):
599 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
601 self._data[u"metadata"][u"version"] = self._version
602 self._msg_type = None
604 def _get_dpdk_version(self, msg):
605 """Called when extraction of DPDK version is required.
607 :param msg: Message to process.
612 if msg.message.count(u"DPDK Version:"):
614 self._version = str(re.search(
615 self.REGEX_VERSION_DPDK, msg.message).group(2))
616 self._data[u"metadata"][u"version"] = self._version
620 self._msg_type = None
622 def _get_papi_history(self, msg):
623 """Called when extraction of PAPI command history is required.
625 :param msg: Message to process.
629 if msg.message.count(u"PAPI command history:"):
630 self._conf_history_lookup_nr += 1
631 if self._conf_history_lookup_nr == 1:
632 self._data[u"tests"][self._test_id][u"conf-history"] = str()
634 self._msg_type = None
636 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
640 ).replace(u'"', u"'")
641 self._data[u"tests"][self._test_id][u"conf-history"] += (
642 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
645 def _get_show_run(self, msg):
646 """Called when extraction of VPP operational data (output of CLI command
647 Show Runtime) is required.
649 :param msg: Message to process.
654 if not msg.message.count(u"stats runtime"):
658 if self._sh_run_counter > 1:
661 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
662 self._data[u"tests"][self._test_id][u"show-run"] = dict()
664 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
668 host = groups.group(1)
669 except (AttributeError, IndexError):
672 sock = groups.group(2)
673 except (AttributeError, IndexError):
676 dut = u"dut{nr}".format(
677 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
679 self._data[u'tests'][self._test_id][u'show-run'][dut] = \
684 u"runtime": str(msg.message).replace(u' ', u'').
685 replace(u'\n', u'').replace(u"'", u'"').
686 replace(u'b"', u'"').replace(u'u"', u'"').
691 def _get_telemetry(self, msg):
692 """Called when extraction of VPP telemetry data is required.
694 :param msg: Message to process.
699 if self._telemetry_kw_counter > 1:
701 if not msg.message.count(u"# TYPE vpp_runtime_calls"):
704 if u"telemetry-show-run" not in \
705 self._data[u"tests"][self._test_id].keys():
706 self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
708 self._telemetry_msg_counter += 1
709 groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
713 host = groups.group(1)
714 except (AttributeError, IndexError):
717 sock = groups.group(2)
718 except (AttributeError, IndexError):
721 u"source_type": u"node",
723 u"msg_type": u"metric",
724 u"log_level": u"INFO",
725 u"timestamp": msg.timestamp,
726 u"msg": u"show_runtime",
731 for line in msg.message.splitlines():
732 if not line.startswith(u"vpp_runtime_"):
735 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
736 cut = params.index(u"{")
737 name = params[:cut].split(u"_", maxsplit=2)[-1]
739 u"dict" + params[cut:].replace('{', '(').replace('}', ')')
741 labels[u"graph_node"] = labels.pop(u"name")
742 runtime[u"data"].append(
746 u"timestamp": timestamp,
750 except (TypeError, ValueError, IndexError):
752 self._data[u'tests'][self._test_id][u'telemetry-show-run']\
753 [f"dut{self._telemetry_msg_counter}"] = copy.copy(
761 def _get_ndrpdr_throughput(self, msg):
762 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
765 :param msg: The test message to be parsed.
767 :returns: Parsed data as a dict and the status (PASS/FAIL).
768 :rtype: tuple(dict, str)
772 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
773 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
776 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
778 if groups is not None:
780 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
781 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
782 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
783 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
785 except (IndexError, ValueError):
788 return throughput, status
790 def _get_ndrpdr_throughput_gbps(self, msg):
791 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
794 :param msg: The test message to be parsed.
796 :returns: Parsed data as a dict and the status (PASS/FAIL).
797 :rtype: tuple(dict, str)
801 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
802 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
805 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
807 if groups is not None:
809 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
810 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
811 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
812 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
814 except (IndexError, ValueError):
819 def _get_plr_throughput(self, msg):
820 """Get PLRsearch lower bound and PLRsearch upper bound from the test
823 :param msg: The test message to be parsed.
825 :returns: Parsed data as a dict and the status (PASS/FAIL).
826 :rtype: tuple(dict, str)
834 groups = re.search(self.REGEX_PLR_RATE, msg)
836 if groups is not None:
838 throughput[u"LOWER"] = float(groups.group(1))
839 throughput[u"UPPER"] = float(groups.group(2))
841 except (IndexError, ValueError):
844 return throughput, status
846 def _get_ndrpdr_latency(self, msg):
847 """Get LATENCY from the test message.
849 :param msg: The test message to be parsed.
851 :returns: Parsed data as a dict and the status (PASS/FAIL).
852 :rtype: tuple(dict, str)
862 u"direction1": copy.copy(latency_default),
863 u"direction2": copy.copy(latency_default)
866 u"direction1": copy.copy(latency_default),
867 u"direction2": copy.copy(latency_default)
870 u"direction1": copy.copy(latency_default),
871 u"direction2": copy.copy(latency_default)
874 u"direction1": copy.copy(latency_default),
875 u"direction2": copy.copy(latency_default)
878 u"direction1": copy.copy(latency_default),
879 u"direction2": copy.copy(latency_default)
882 u"direction1": copy.copy(latency_default),
883 u"direction2": copy.copy(latency_default)
887 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
889 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
891 return latency, u"FAIL"
893 def process_latency(in_str):
894 """Return object with parsed latency values.
896 TODO: Define class for the return type.
898 :param in_str: Input string, min/avg/max/hdrh format.
900 :returns: Dict with corresponding keys, except hdrh float values.
902 :throws IndexError: If in_str does not have enough substrings.
903 :throws ValueError: If a substring does not convert to float.
905 in_list = in_str.split('/', 3)
908 u"min": float(in_list[0]),
909 u"avg": float(in_list[1]),
910 u"max": float(in_list[2]),
914 if len(in_list) == 4:
915 rval[u"hdrh"] = str(in_list[3])
920 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
921 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
922 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
923 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
924 if groups.lastindex == 4:
925 return latency, u"PASS"
926 except (IndexError, ValueError):
930 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
931 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
932 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
933 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
934 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
935 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
936 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
937 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
938 if groups.lastindex == 12:
939 return latency, u"PASS"
940 except (IndexError, ValueError):
943 return latency, u"FAIL"
946 def _get_hoststack_data(msg, tags):
947 """Get data from the hoststack test message.
949 :param msg: The test message to be parsed.
950 :param tags: Test tags.
953 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
954 :rtype: tuple(dict, str)
959 msg = msg.replace(u"'", u'"').replace(u" ", u"")
960 if u"LDPRELOAD" in tags:
964 except JSONDecodeError:
966 elif u"VPPECHO" in tags:
968 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
970 client=loads(msg_lst[0]),
971 server=loads(msg_lst[1])
974 except (JSONDecodeError, IndexError):
977 return result, status
979 def _get_vsap_data(self, msg, tags):
980 """Get data from the vsap test message.
982 :param msg: The test message to be parsed.
983 :param tags: Test tags.
986 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
987 :rtype: tuple(dict, str)
992 groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
993 if groups is not None:
995 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
996 result[u"latency"] = float(groups.group(2))
997 result[u"completed-requests"] = int(groups.group(3))
998 result[u"failed-requests"] = int(groups.group(4))
999 result[u"bytes-transferred"] = int(groups.group(5))
1000 if u"TCP_CPS"in tags:
1001 result[u"cps"] = float(groups.group(6))
1002 elif u"TCP_RPS" in tags:
1003 result[u"rps"] = float(groups.group(6))
1005 return result, status
1007 except (IndexError, ValueError):
1010 return result, status
1012 def visit_suite(self, suite):
1013 """Implements traversing through the suite and its direct children.
1015 :param suite: Suite to process.
1019 if self.start_suite(suite) is not False:
1020 suite.suites.visit(self)
1021 suite.tests.visit(self)
1022 self.end_suite(suite)
1024 def start_suite(self, suite):
1025 """Called when suite starts.
1027 :param suite: Suite to process.
1033 parent_name = suite.parent.name
1034 except AttributeError:
1037 self._data[u"suites"][suite.longname.lower().
1038 replace(u'"', u"'").
1039 replace(u" ", u"_")] = {
1040 u"name": suite.name.lower(),
1042 u"parent": parent_name,
1043 u"level": len(suite.longname.split(u"."))
1046 suite.keywords.visit(self)
1048 def end_suite(self, suite):
1049 """Called when suite ends.
1051 :param suite: Suite to process.
1056 def visit_test(self, test):
1057 """Implements traversing through the test.
1059 :param test: Test to process.
1063 if self.start_test(test) is not False:
1064 test.keywords.visit(self)
1067 def start_test(self, test):
1068 """Called when test starts.
1070 :param test: Test to process.
1075 self._sh_run_counter = 0
1076 self._telemetry_kw_counter = 0
1077 self._telemetry_msg_counter = 0
1079 longname_orig = test.longname.lower()
1081 # Check the ignore list
1082 if longname_orig in self._ignore:
1085 tags = [str(tag) for tag in test.tags]
1086 test_result = dict()
1088 # Change the TC long name and name if defined in the mapping table
1089 longname = self._mapping.get(longname_orig, None)
1090 if longname is not None:
1091 name = longname.split(u'.')[-1]
1093 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1097 longname = longname_orig
1098 name = test.name.lower()
1100 # Remove TC number from the TC long name (backward compatibility):
1101 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1102 # Remove TC number from the TC name (not needed):
1103 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1105 test_result[u"parent"] = test.parent.name.lower()
1106 test_result[u"tags"] = tags
1107 test_result["doc"] = test.doc
1108 test_result[u"type"] = u""
1109 test_result[u"status"] = test.status
1110 test_result[u"starttime"] = test.starttime
1111 test_result[u"endtime"] = test.endtime
1113 if test.status == u"PASS":
1114 if u"NDRPDR" in tags:
1115 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1116 test_result[u"msg"] = self._get_data_from_pps_test_msg(
1118 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1119 test_result[u"msg"] = self._get_data_from_cps_test_msg(
1122 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1124 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1125 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1128 test_result[u"msg"] = test.message
1130 test_result[u"msg"] = test.message
1132 if u"PERFTEST" in tags and u"TREX" not in tags:
1133 # Replace info about cores (e.g. -1c-) with the info about threads
1134 # and cores (e.g. -1t1c-) in the long test case names and in the
1135 # test case names if necessary.
1138 for tag in test_result[u"tags"]:
1139 groups = re.search(self.REGEX_TC_TAG, tag)
1145 self._test_id = re.sub(
1146 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1147 self._test_id, count=1
1149 test_result[u"name"] = re.sub(
1150 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1151 test_result["name"], count=1
1154 test_result[u"status"] = u"FAIL"
1155 self._data[u"tests"][self._test_id] = test_result
1157 f"The test {self._test_id} has no or more than one "
1158 f"multi-threading tags.\n"
1159 f"Tags: {test_result[u'tags']}"
1163 if u"DEVICETEST" in tags:
1164 test_result[u"type"] = u"DEVICETEST"
1165 elif u"NDRPDR" in tags:
1166 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1167 test_result[u"type"] = u"CPS"
1169 test_result[u"type"] = u"NDRPDR"
1170 if test.status == u"PASS":
1171 test_result[u"throughput"], test_result[u"status"] = \
1172 self._get_ndrpdr_throughput(test.message)
1173 test_result[u"gbps"], test_result[u"status"] = \
1174 self._get_ndrpdr_throughput_gbps(test.message)
1175 test_result[u"latency"], test_result[u"status"] = \
1176 self._get_ndrpdr_latency(test.message)
1177 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1179 test_result[u"type"] = u"MRR"
1181 test_result[u"type"] = u"BMRR"
1182 if test.status == u"PASS":
1183 test_result[u"result"] = dict()
1184 groups = re.search(self.REGEX_BMRR, test.message)
1185 if groups is not None:
1186 items_str = groups.group(1)
1188 float(item.strip().replace(u"'", u""))
1189 for item in items_str.split(",")
1191 # Use whole list in CSIT-1180.
1192 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1193 test_result[u"result"][u"samples"] = items_float
1194 test_result[u"result"][u"receive-rate"] = stats.avg
1195 test_result[u"result"][u"receive-stdev"] = stats.stdev
1197 groups = re.search(self.REGEX_MRR, test.message)
1198 test_result[u"result"][u"receive-rate"] = \
1199 float(groups.group(3)) / float(groups.group(1))
1200 elif u"SOAK" in tags:
1201 test_result[u"type"] = u"SOAK"
1202 if test.status == u"PASS":
1203 test_result[u"throughput"], test_result[u"status"] = \
1204 self._get_plr_throughput(test.message)
1205 elif u"HOSTSTACK" in tags:
1206 test_result[u"type"] = u"HOSTSTACK"
1207 if test.status == u"PASS":
1208 test_result[u"result"], test_result[u"status"] = \
1209 self._get_hoststack_data(test.message, tags)
1210 elif u"LDP_NGINX" in tags:
1211 test_result[u"type"] = u"LDP_NGINX"
1212 test_result[u"result"], test_result[u"status"] = \
1213 self._get_vsap_data(test.message, tags)
1214 # elif u"TCP" in tags: # This might be not used
1215 # test_result[u"type"] = u"TCP"
1216 # if test.status == u"PASS":
1217 # groups = re.search(self.REGEX_TCP, test.message)
1218 # test_result[u"result"] = int(groups.group(2))
1219 elif u"RECONF" in tags:
1220 test_result[u"type"] = u"RECONF"
1221 if test.status == u"PASS":
1222 test_result[u"result"] = None
1224 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1225 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1226 test_result[u"result"] = {
1227 u"loss": int(grps_loss.group(1)),
1228 u"time": float(grps_time.group(1))
1230 except (AttributeError, IndexError, ValueError, TypeError):
1231 test_result[u"status"] = u"FAIL"
1233 test_result[u"status"] = u"FAIL"
1235 self._data[u"tests"][self._test_id] = test_result
1237 def end_test(self, test):
1238 """Called when test ends.
1240 :param test: Test to process.
1245 def visit_keyword(self, keyword):
1246 """Implements traversing through the keyword and its child keywords.
1248 :param keyword: Keyword to process.
1249 :type keyword: Keyword
1252 if self.start_keyword(keyword) is not False:
1253 self.end_keyword(keyword)
1255 def start_keyword(self, keyword):
1256 """Called when keyword starts. Default implementation does nothing.
1258 :param keyword: Keyword to process.
1259 :type keyword: Keyword
1263 if keyword.type == u"setup":
1264 self.visit_setup_kw(keyword)
1265 elif keyword.type == u"teardown":
1266 self.visit_teardown_kw(keyword)
1268 self.visit_test_kw(keyword)
1269 except AttributeError:
1272 def end_keyword(self, keyword):
1273 """Called when keyword ends. Default implementation does nothing.
1275 :param keyword: Keyword to process.
1276 :type keyword: Keyword
1280 def visit_test_kw(self, test_kw):
1281 """Implements traversing through the test keyword and its child
1284 :param test_kw: Keyword to process.
1285 :type test_kw: Keyword
1288 for keyword in test_kw.keywords:
1289 if self.start_test_kw(keyword) is not False:
1290 self.visit_test_kw(keyword)
1291 self.end_test_kw(keyword)
1293 def start_test_kw(self, test_kw):
1294 """Called when test keyword starts. Default implementation does
1297 :param test_kw: Keyword to process.
1298 :type test_kw: Keyword
1301 if not self._process_oper:
1304 if test_kw.name.count(u"Run Telemetry On All Duts"):
1305 self._msg_type = u"test-telemetry"
1306 self._telemetry_kw_counter += 1
1307 elif test_kw.name.count(u"Show Runtime On All Duts"):
1308 self._msg_type = u"test-show-runtime"
1309 self._sh_run_counter += 1
1312 test_kw.messages.visit(self)
1314 def end_test_kw(self, test_kw):
1315 """Called when keyword ends. Default implementation does nothing.
1317 :param test_kw: Keyword to process.
1318 :type test_kw: Keyword
1322 def visit_setup_kw(self, setup_kw):
1323 """Implements traversing through the teardown keyword and its child
1326 :param setup_kw: Keyword to process.
1327 :type setup_kw: Keyword
1330 for keyword in setup_kw.keywords:
1331 if self.start_setup_kw(keyword) is not False:
1332 self.visit_setup_kw(keyword)
1333 self.end_setup_kw(keyword)
1335 def start_setup_kw(self, setup_kw):
1336 """Called when teardown keyword starts. Default implementation does
1339 :param setup_kw: Keyword to process.
1340 :type setup_kw: Keyword
1343 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1344 and not self._version:
1345 self._msg_type = u"vpp-version"
1346 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1348 self._msg_type = u"dpdk-version"
1349 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1350 self._msg_type = u"testbed"
1353 setup_kw.messages.visit(self)
1355 def end_setup_kw(self, setup_kw):
1356 """Called when keyword ends. Default implementation does nothing.
1358 :param setup_kw: Keyword to process.
1359 :type setup_kw: Keyword
1363 def visit_teardown_kw(self, teardown_kw):
1364 """Implements traversing through the teardown keyword and its child
1367 :param teardown_kw: Keyword to process.
1368 :type teardown_kw: Keyword
1371 for keyword in teardown_kw.keywords:
1372 if self.start_teardown_kw(keyword) is not False:
1373 self.visit_teardown_kw(keyword)
1374 self.end_teardown_kw(keyword)
1376 def start_teardown_kw(self, teardown_kw):
1377 """Called when teardown keyword starts
1379 :param teardown_kw: Keyword to process.
1380 :type teardown_kw: Keyword
1383 if teardown_kw.name.count(u"Show Papi History On All Duts"):
1384 self._conf_history_lookup_nr = 0
1385 self._msg_type = u"teardown-papi-history"
1386 teardown_kw.messages.visit(self)
1388 def end_teardown_kw(self, teardown_kw):
1389 """Called when keyword ends. Default implementation does nothing.
1391 :param teardown_kw: Keyword to process.
1392 :type teardown_kw: Keyword
1396 def visit_message(self, msg):
1397 """Implements visiting the message.
1399 :param msg: Message to process.
1403 if self.start_message(msg) is not False:
1404 self.end_message(msg)
1406 def start_message(self, msg):
1407 """Called when message starts. Get required information from messages:
1410 :param msg: Message to process.
1415 self.parse_msg[self._msg_type](msg)
1417 def end_message(self, msg):
1418 """Called when message ends. Default implementation does nothing.
1420 :param msg: Message to process.
1429 The data is extracted from output.xml files generated by Jenkins jobs and
1430 stored in pandas' DataFrames.
1436 (as described in ExecutionChecker documentation)
1438 (as described in ExecutionChecker documentation)
1440 (as described in ExecutionChecker documentation)
1443 def __init__(self, spec, for_output):
1446 :param spec: Specification.
1447 :param for_output: Output to be generated from downloaded data.
1448 :type spec: Specification
1449 :type for_output: str
1455 self._for_output = for_output
1458 self._input_data = pd.Series()
1462 """Getter - Input data.
1464 :returns: Input data
1465 :rtype: pandas.Series
1467 return self._input_data
1469 def metadata(self, job, build):
1470 """Getter - metadata
1472 :param job: Job which metadata we want.
1473 :param build: Build which metadata we want.
1477 :rtype: pandas.Series
1479 return self.data[job][build][u"metadata"]
1481 def suites(self, job, build):
1484 :param job: Job which suites we want.
1485 :param build: Build which suites we want.
1489 :rtype: pandas.Series
1491 return self.data[job][str(build)][u"suites"]
1493 def tests(self, job, build):
1496 :param job: Job which tests we want.
1497 :param build: Build which tests we want.
1501 :rtype: pandas.Series
1503 return self.data[job][build][u"tests"]
1505 def _parse_tests(self, job, build):
1506 """Process data from robot output.xml file and return JSON structured
1509 :param job: The name of job which build output data will be processed.
1510 :param build: The build which output data will be processed.
1513 :returns: JSON data structure.
1522 with open(build[u"file-name"], u'r') as data_file:
1524 result = ExecutionResult(data_file)
1525 except errors.DataError as err:
1527 f"Error occurred while parsing output.xml: {repr(err)}"
1531 process_oper = False
1532 if u"-vpp-perf-report-coverage-" in job:
1534 elif u"-vpp-perf-report-iterative-" in job:
1535 # Exceptions for TBs where we do not have coverage data:
1536 for item in (u"-2n-icx", ):
1540 checker = ExecutionChecker(
1541 metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1543 result.visit(checker)
1545 checker.data[u"metadata"][u"tests_total"] = \
1546 result.statistics.total.all.total
1547 checker.data[u"metadata"][u"tests_passed"] = \
1548 result.statistics.total.all.passed
1549 checker.data[u"metadata"][u"tests_failed"] = \
1550 result.statistics.total.all.failed
1551 checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1552 checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1556 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1557 """Download and parse the input data file.
1559 :param pid: PID of the process executing this method.
1560 :param job: Name of the Jenkins job which generated the processed input
1562 :param build: Information about the Jenkins build which generated the
1563 processed input file.
1564 :param repeat: Repeat the download specified number of times if not
1572 logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1579 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1585 f"It is not possible to download the input data file from the "
1586 f"job {job}, build {build[u'build']}, or it is damaged. "
1590 logging.info(f" Processing data from build {build[u'build']}")
1591 data = self._parse_tests(job, build)
1594 f"Input data file from the job {job}, build "
1595 f"{build[u'build']} is damaged. Skipped."
1598 state = u"processed"
1601 remove(build[u"file-name"])
1602 except OSError as err:
1604 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1607 # If the time-period is defined in the specification file, remove all
1608 # files which are outside the time period.
1610 timeperiod = self._cfg.environment.get(u"time-period", None)
1611 if timeperiod and data:
1613 timeperiod = timedelta(int(timeperiod))
1614 metadata = data.get(u"metadata", None)
1616 generated = metadata.get(u"generated", None)
1618 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1619 if (now - generated) > timeperiod:
1620 # Remove the data and the file:
1625 f" The build {job}/{build[u'build']} is "
1626 f"outdated, will be removed."
1636 def download_and_parse_data(self, repeat=1):
1637 """Download the input data files, parse input data from input files and
1638 store in pandas' Series.
1640 :param repeat: Repeat the download specified number of times if not
1645 logging.info(u"Downloading and parsing input files ...")
1647 for job, builds in self._cfg.input.items():
1648 for build in builds:
1650 result = self._download_and_parse_build(job, build, repeat)
1653 build_nr = result[u"build"][u"build"]
1656 data = result[u"data"]
1657 build_data = pd.Series({
1658 u"metadata": pd.Series(
1659 list(data[u"metadata"].values()),
1660 index=list(data[u"metadata"].keys())
1662 u"suites": pd.Series(
1663 list(data[u"suites"].values()),
1664 index=list(data[u"suites"].keys())
1666 u"tests": pd.Series(
1667 list(data[u"tests"].values()),
1668 index=list(data[u"tests"].keys())
1672 if self._input_data.get(job, None) is None:
1673 self._input_data[job] = pd.Series()
1674 self._input_data[job][str(build_nr)] = build_data
1675 self._cfg.set_input_file_name(
1676 job, build_nr, result[u"build"][u"file-name"]
1678 self._cfg.set_input_state(job, build_nr, result[u"state"])
1681 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1682 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1684 logging.info(u"Done.")
1686 msg = f"Successful downloads from the sources:\n"
1687 for source in self._cfg.environment[u"data-sources"]:
1688 if source[u"successful-downloads"]:
1690 f"{source[u'url']}/{source[u'path']}/"
1691 f"{source[u'file-name']}: "
1692 f"{source[u'successful-downloads']}\n"
1696 def process_local_file(self, local_file, job=u"local", build_nr=1,
1698 """Process local XML file given as a command-line parameter.
1700 :param local_file: The file to process.
1701 :param job: Job name.
1702 :param build_nr: Build number.
1703 :param replace: If True, the information about jobs and builds is
1704 replaced by the new one, otherwise the new jobs and builds are
1706 :type local_file: str
1710 :raises: PresentationError if an error occurs.
1712 if not isfile(local_file):
1713 raise PresentationError(f"The file {local_file} does not exist.")
1716 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1717 except (IndexError, ValueError):
1722 u"status": u"failed",
1723 u"file-name": local_file
1726 self._cfg.input = dict()
1727 self._cfg.add_build(job, build)
1729 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1730 data = self._parse_tests(job, build)
1732 raise PresentationError(
1733 f"Error occurred while parsing the file {local_file}"
1736 build_data = pd.Series({
1737 u"metadata": pd.Series(
1738 list(data[u"metadata"].values()),
1739 index=list(data[u"metadata"].keys())
1741 u"suites": pd.Series(
1742 list(data[u"suites"].values()),
1743 index=list(data[u"suites"].keys())
1745 u"tests": pd.Series(
1746 list(data[u"tests"].values()),
1747 index=list(data[u"tests"].keys())
1751 if self._input_data.get(job, None) is None:
1752 self._input_data[job] = pd.Series()
1753 self._input_data[job][str(build_nr)] = build_data
1755 self._cfg.set_input_state(job, build_nr, u"processed")
1757 def process_local_directory(self, local_dir, replace=True):
1758 """Process local directory with XML file(s). The directory is processed
1759 as a 'job' and the XML files in it as builds.
1760 If the given directory contains only sub-directories, these
1761 sub-directories processed as jobs and corresponding XML files as builds
1764 :param local_dir: Local directory to process.
1765 :param replace: If True, the information about jobs and builds is
1766 replaced by the new one, otherwise the new jobs and builds are
1768 :type local_dir: str
1771 if not isdir(local_dir):
1772 raise PresentationError(
1773 f"The directory {local_dir} does not exist."
1776 # Check if the given directory includes only files, or only directories
1777 _, dirnames, filenames = next(walk(local_dir))
1779 if filenames and not dirnames:
1782 # key: dir (job) name, value: list of file names (builds)
1784 local_dir: [join(local_dir, name) for name in filenames]
1787 elif dirnames and not filenames:
1790 # key: dir (job) name, value: list of file names (builds)
1791 local_builds = dict()
1792 for dirname in dirnames:
1794 join(local_dir, dirname, name)
1795 for name in listdir(join(local_dir, dirname))
1796 if isfile(join(local_dir, dirname, name))
1799 local_builds[dirname] = sorted(builds)
1801 elif not filenames and not dirnames:
1802 raise PresentationError(f"The directory {local_dir} is empty.")
1804 raise PresentationError(
1805 f"The directory {local_dir} can include only files or only "
1806 f"directories, not both.\nThe directory {local_dir} includes "
1807 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1811 self._cfg.input = dict()
1813 for job, files in local_builds.items():
1814 for idx, local_file in enumerate(files):
1815 self.process_local_file(local_file, job, idx + 1, replace=False)
1818 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1819 """Return the index of character in the string which is the end of tag.
1821 :param tag_filter: The string where the end of tag is being searched.
1822 :param start: The index where the searching is stated.
1823 :param closer: The character which is the tag closer.
1824 :type tag_filter: str
1827 :returns: The index of the tag closer.
1831 idx_opener = tag_filter.index(closer, start)
1832 return tag_filter.index(closer, idx_opener + 1)
1837 def _condition(tag_filter):
1838 """Create a conditional statement from the given tag filter.
1840 :param tag_filter: Filter based on tags from the element specification.
1841 :type tag_filter: str
1842 :returns: Conditional statement which can be evaluated.
1847 index = InputData._end_of_tag(tag_filter, index)
1851 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1853 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1854 continue_on_error=False):
1855 """Filter required data from the given jobs and builds.
1857 The output data structure is:
1860 - test (or suite) 1 ID:
1866 - test (or suite) n ID:
1873 :param element: Element which will use the filtered data.
1874 :param params: Parameters which will be included in the output. If None,
1875 all parameters are included.
1876 :param data: If not None, this data is used instead of data specified
1878 :param data_set: The set of data to be filtered: tests, suites,
1880 :param continue_on_error: Continue if there is error while reading the
1881 data. The Item will be empty then
1882 :type element: pandas.Series
1886 :type continue_on_error: bool
1887 :returns: Filtered data.
1888 :rtype pandas.Series
1892 if data_set == "suites":
1894 elif element[u"filter"] in (u"all", u"template"):
1897 cond = InputData._condition(element[u"filter"])
1898 logging.debug(f" Filter: {cond}")
1900 logging.error(u" No filter defined.")
1904 params = element.get(u"parameters", None)
1906 params.extend((u"type", u"status"))
1908 data_to_filter = data if data else element[u"data"]
1911 for job, builds in data_to_filter.items():
1912 data[job] = pd.Series()
1913 for build in builds:
1914 data[job][str(build)] = pd.Series()
1917 self.data[job][str(build)][data_set].items())
1919 if continue_on_error:
1923 for test_id, test_data in data_dict.items():
1924 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1925 data[job][str(build)][test_id] = pd.Series()
1927 for param, val in test_data.items():
1928 data[job][str(build)][test_id][param] = val
1930 for param in params:
1932 data[job][str(build)][test_id][param] =\
1935 data[job][str(build)][test_id][param] =\
1939 except (KeyError, IndexError, ValueError) as err:
1941 f"Missing mandatory parameter in the element specification: "
1945 except AttributeError as err:
1946 logging.error(repr(err))
1948 except SyntaxError as err:
1950 f"The filter {cond} is not correct. Check if all tags are "
1951 f"enclosed by apostrophes.\n{repr(err)}"
1955 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1956 continue_on_error=False):
1957 """Filter required data from the given jobs and builds.
1959 The output data structure is:
1962 - test (or suite) 1 ID:
1968 - test (or suite) n ID:
1975 :param element: Element which will use the filtered data.
1976 :param params: Parameters which will be included in the output. If None,
1977 all parameters are included.
1978 :param data_set: The set of data to be filtered: tests, suites,
1980 :param continue_on_error: Continue if there is error while reading the
1981 data. The Item will be empty then
1982 :type element: pandas.Series
1985 :type continue_on_error: bool
1986 :returns: Filtered data.
1987 :rtype pandas.Series
1990 include = element.get(u"include", None)
1992 logging.warning(u"No tests to include, skipping the element.")
1996 params = element.get(u"parameters", None)
1997 if params and u"type" not in params:
1998 params.append(u"type")
2000 cores = element.get(u"core", None)
2004 for test in include:
2005 tests.append(test.format(core=core))
2011 for job, builds in element[u"data"].items():
2012 data[job] = pd.Series()
2013 for build in builds:
2014 data[job][str(build)] = pd.Series()
2017 reg_ex = re.compile(str(test).lower())
2018 for test_id in self.data[job][
2019 str(build)][data_set].keys():
2020 if re.match(reg_ex, str(test_id).lower()):
2021 test_data = self.data[job][
2022 str(build)][data_set][test_id]
2023 data[job][str(build)][test_id] = pd.Series()
2025 for param, val in test_data.items():
2026 data[job][str(build)][test_id]\
2029 for param in params:
2031 data[job][str(build)][
2035 data[job][str(build)][
2036 test_id][param] = u"No Data"
2037 except KeyError as err:
2038 if continue_on_error:
2039 logging.debug(repr(err))
2041 logging.error(repr(err))
2045 except (KeyError, IndexError, ValueError) as err:
2047 f"Missing mandatory parameter in the element "
2048 f"specification: {repr(err)}"
2051 except AttributeError as err:
2052 logging.error(repr(err))
2056 def merge_data(data):
2057 """Merge data from more jobs and builds to a simple data structure.
2059 The output data structure is:
2061 - test (suite) 1 ID:
2067 - test (suite) n ID:
2070 :param data: Data to merge.
2071 :type data: pandas.Series
2072 :returns: Merged data.
2073 :rtype: pandas.Series
2076 logging.info(u" Merging data ...")
2078 merged_data = pd.Series()
2079 for builds in data.values:
2080 for item in builds.values:
2081 for item_id, item_data in item.items():
2082 merged_data[item_id] = item_data
2085 def print_all_oper_data(self):
2086 """Print all operational data to console.
2089 for job in self._input_data.values:
2090 for build in job.values:
2091 for test_id, test_data in build[u"tests"].items():
2093 if test_data.get(u"show-run", None) is None:
2095 for dut_name, data in test_data[u"show-run"].items():
2096 if data.get(u"runtime", None) is None:
2098 runtime = loads(data[u"runtime"])
2100 threads_nr = len(runtime[0][u"clocks"])
2101 except (IndexError, KeyError):
2103 threads = OrderedDict(
2104 {idx: list() for idx in range(threads_nr)})
2105 for item in runtime:
2106 for idx in range(threads_nr):
2107 if item[u"vectors"][idx] > 0:
2108 clocks = item[u"clocks"][idx] / \
2109 item[u"vectors"][idx]
2110 elif item[u"calls"][idx] > 0:
2111 clocks = item[u"clocks"][idx] / \
2113 elif item[u"suspends"][idx] > 0:
2114 clocks = item[u"clocks"][idx] / \
2115 item[u"suspends"][idx]
2119 if item[u"calls"][idx] > 0:
2120 vectors_call = item[u"vectors"][idx] / \
2125 if int(item[u"calls"][idx]) + int(
2126 item[u"vectors"][idx]) + \
2127 int(item[u"suspends"][idx]):
2128 threads[idx].append([
2130 item[u"calls"][idx],
2131 item[u"vectors"][idx],
2132 item[u"suspends"][idx],
2137 print(f"Host IP: {data.get(u'host', '')}, "
2138 f"Socket: {data.get(u'socket', '')}")
2139 for thread_nr, thread in threads.items():
2140 txt_table = prettytable.PrettyTable(
2146 u"Cycles per Packet",
2147 u"Average Vector Size"
2152 txt_table.add_row(row)
2154 if len(thread) == 0:
2157 avg = f", Average Vector Size per Node: " \
2158 f"{(avg / len(thread)):.2f}"
2159 th_name = u"main" if thread_nr == 0 \
2160 else f"worker_{thread_nr}"
2161 print(f"{dut_name}, {th_name}{avg}")
2162 txt_table.float_format = u".2"
2163 txt_table.align = u"r"
2164 txt_table.align[u"Name"] = u"l"
2165 print(f"{txt_table.get_string()}\n")