1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_CPS_MSG_INFO = re.compile(
233 r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234 r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
236 REGEX_PPS_MSG_INFO = re.compile(
237 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
240 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
242 REGEX_VSAP_MSG_INFO = re.compile(
243 r'Transfer Rate: (\d*.\d*).*\n'
244 r'Latency: (\d*.\d*).*\n'
245 r'Completed requests: (\d*).*\n'
246 r'Failed requests: (\d*).*\n'
247 r'Total data transferred: (\d*).*\n'
248 r'Connection [cr]ps rate:\s*(\d*.\d*)'
251 # Needed for CPS and PPS tests
252 REGEX_NDRPDR_LAT_BASE = re.compile(
253 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
256 REGEX_NDRPDR_LAT = re.compile(
257 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
265 REGEX_VERSION_VPP = re.compile(
266 r"(return STDOUT Version:\s*|"
267 r"VPP Version:\s*|VPP version:\s*)(.*)"
269 REGEX_VERSION_DPDK = re.compile(
270 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
272 REGEX_TCP = re.compile(
273 r'Total\s(rps|cps|throughput):\s(\d*).*$'
275 REGEX_MRR = re.compile(
276 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277 r'tx\s(\d*),\srx\s(\d*)'
279 REGEX_BMRR = re.compile(
280 r'.*trial results.*: \[(.*)\]'
282 REGEX_RECONF_LOSS = re.compile(
283 r'Packets lost due to reconfig: (\d*)'
285 REGEX_RECONF_TIME = re.compile(
286 r'Implied time lost: (\d*.[\de-]*)'
288 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
290 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
292 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
294 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
296 REGEX_SH_RUN_HOST = re.compile(
297 r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
300 def __init__(self, metadata, mapping, ignore, process_oper):
303 :param metadata: Key-value pairs to be included in "metadata" part of
305 :param mapping: Mapping of the old names of test cases to the new
307 :param ignore: List of TCs to be ignored.
308 :param process_oper: If True, operational data (show run, telemetry) is
313 :type process_oper: bool
316 # Type of message to parse out from the test messages
317 self._msg_type = None
323 self._timestamp = None
325 # Testbed. The testbed is identified by TG node IP address.
328 # Mapping of TCs long names
329 self._mapping = mapping
332 self._ignore = ignore
334 self._process_oper = process_oper
336 # Number of PAPI History messages found:
338 # 1 - PAPI History of DUT1
339 # 2 - PAPI History of DUT2
340 self._conf_history_lookup_nr = 0
342 self._sh_run_counter = 0
343 self._telemetry_kw_counter = 0
344 self._telemetry_msg_counter = 0
346 # Test ID of currently processed test- the lowercase full path to the
350 # The main data structure
352 u"metadata": OrderedDict(),
353 u"suites": OrderedDict(),
354 u"tests": OrderedDict()
357 # Save the provided metadata
358 for key, val in metadata.items():
359 self._data[u"metadata"][key] = val
361 # Dictionary defining the methods used to parse different types of
364 u"vpp-version": self._get_vpp_version,
365 u"dpdk-version": self._get_dpdk_version,
366 u"teardown-papi-history": self._get_papi_history,
367 u"test-show-runtime": self._get_show_run,
368 u"testbed": self._get_testbed,
369 u"test-telemetry": self._get_telemetry
374 """Getter - Data parsed from the XML file.
376 :returns: Data parsed from the XML file.
381 def _get_data_from_mrr_test_msg(self, msg):
382 """Get info from message of MRR performance tests.
384 :param msg: Message to be processed.
386 :returns: Processed message or original message if a problem occurs.
390 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
391 if not groups or groups.lastindex != 1:
392 return u"Test Failed."
395 data = groups.group(1).split(u", ")
396 except (AttributeError, IndexError, ValueError, KeyError):
397 return u"Test Failed."
402 out_str += f"{(float(item) / 1e6):.2f}, "
403 return out_str[:-2] + u"]"
404 except (AttributeError, IndexError, ValueError, KeyError):
405 return u"Test Failed."
407 def _get_data_from_cps_test_msg(self, msg):
408 """Get info from message of NDRPDR CPS tests.
410 :param msg: Message to be processed.
412 :returns: Processed message or "Test Failed." if a problem occurs.
416 groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
417 if not groups or groups.lastindex != 2:
418 return u"Test Failed."
422 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
423 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
425 except (AttributeError, IndexError, ValueError, KeyError):
426 return u"Test Failed."
428 def _get_data_from_pps_test_msg(self, msg):
429 """Get info from message of NDRPDR PPS tests.
431 :param msg: Message to be processed.
433 :returns: Processed message or "Test Failed." if a problem occurs.
437 groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
438 if not groups or groups.lastindex != 4:
439 return u"Test Failed."
443 f"1. {(float(groups.group(1)) / 1e6):5.2f} "
444 f"{float(groups.group(2)):5.2f}\n"
445 f"2. {(float(groups.group(3)) / 1e6):5.2f} "
446 f"{float(groups.group(4)):5.2f}"
448 except (AttributeError, IndexError, ValueError, KeyError):
449 return u"Test Failed."
451 def _get_data_from_perf_test_msg(self, msg):
452 """Get info from message of NDRPDR performance tests.
454 :param msg: Message to be processed.
456 :returns: Processed message or "Test Failed." if a problem occurs.
460 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
461 if not groups or groups.lastindex != 10:
462 return u"Test Failed."
466 u"ndr_low": float(groups.group(1)),
467 u"ndr_low_b": float(groups.group(2)),
468 u"pdr_low": float(groups.group(3)),
469 u"pdr_low_b": float(groups.group(4)),
470 u"pdr_lat_90_1": groups.group(5),
471 u"pdr_lat_90_2": groups.group(6),
472 u"pdr_lat_50_1": groups.group(7),
473 u"pdr_lat_50_2": groups.group(8),
474 u"pdr_lat_10_1": groups.group(9),
475 u"pdr_lat_10_2": groups.group(10),
477 except (AttributeError, IndexError, ValueError, KeyError):
478 return u"Test Failed."
480 def _process_lat(in_str_1, in_str_2):
481 """Extract P50, P90 and P99 latencies or min, avg, max values from
484 :param in_str_1: Latency string for one direction produced by robot
486 :param in_str_2: Latency string for second direction produced by
490 :returns: Processed latency string or None if a problem occurs.
493 in_list_1 = in_str_1.split('/', 3)
494 in_list_2 = in_str_2.split('/', 3)
496 if len(in_list_1) != 4 and len(in_list_2) != 4:
499 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
501 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
502 except hdrh.codec.HdrLengthException:
505 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
507 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
508 except hdrh.codec.HdrLengthException:
511 if hdr_lat_1 and hdr_lat_2:
513 hdr_lat_1.get_value_at_percentile(50.0),
514 hdr_lat_1.get_value_at_percentile(90.0),
515 hdr_lat_1.get_value_at_percentile(99.0),
516 hdr_lat_2.get_value_at_percentile(50.0),
517 hdr_lat_2.get_value_at_percentile(90.0),
518 hdr_lat_2.get_value_at_percentile(99.0)
524 int(in_list_1[0]), int(in_list_1[1]), int(in_list_1[2]),
525 int(in_list_2[0]), int(in_list_2[1]), int(in_list_2[2])
528 if item in (-1, 4294967295, 0):
534 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
535 f"{data[u'ndr_low_b']:5.2f}"
536 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
537 f"{data[u'pdr_low_b']:5.2f}"
540 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
541 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
542 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
545 max_len = len(str(max((max(item) for item in latency))))
546 max_len = 4 if max_len < 4 else max_len
548 for idx, lat in enumerate(latency):
553 f"{lat[0]:{max_len}d} "
554 f"{lat[1]:{max_len}d} "
555 f"{lat[2]:{max_len}d} "
556 f"{lat[3]:{max_len}d} "
557 f"{lat[4]:{max_len}d} "
558 f"{lat[5]:{max_len}d} "
563 except (AttributeError, IndexError, ValueError, KeyError):
564 return u"Test Failed."
566 def _get_testbed(self, msg):
567 """Called when extraction of testbed IP is required.
568 The testbed is identified by TG node IP address.
570 :param msg: Message to process.
575 if msg.message.count(u"Setup of TG node") or \
576 msg.message.count(u"Setup of node TG host"):
577 reg_tg_ip = re.compile(
578 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
580 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
581 except (KeyError, ValueError, IndexError, AttributeError):
584 self._data[u"metadata"][u"testbed"] = self._testbed
585 self._msg_type = None
587 def _get_vpp_version(self, msg):
588 """Called when extraction of VPP version is required.
590 :param msg: Message to process.
595 if msg.message.count(u"return STDOUT Version:") or \
596 msg.message.count(u"VPP Version:") or \
597 msg.message.count(u"VPP version:"):
599 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
601 self._data[u"metadata"][u"version"] = self._version
602 self._msg_type = None
603 logging.info(self._version)
605 def _get_dpdk_version(self, msg):
606 """Called when extraction of DPDK version is required.
608 :param msg: Message to process.
613 if msg.message.count(u"DPDK Version:"):
615 self._version = str(re.search(
616 self.REGEX_VERSION_DPDK, msg.message).group(2))
617 self._data[u"metadata"][u"version"] = self._version
621 self._msg_type = None
623 def _get_papi_history(self, msg):
624 """Called when extraction of PAPI command history is required.
626 :param msg: Message to process.
630 if msg.message.count(u"PAPI command history:"):
631 self._conf_history_lookup_nr += 1
632 if self._conf_history_lookup_nr == 1:
633 self._data[u"tests"][self._test_id][u"conf-history"] = str()
635 self._msg_type = None
637 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
641 ).replace(u'"', u"'")
642 self._data[u"tests"][self._test_id][u"conf-history"] += (
643 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
646 def _get_show_run(self, msg):
647 """Called when extraction of VPP operational data (output of CLI command
648 Show Runtime) is required.
650 :param msg: Message to process.
655 if not msg.message.count(u"stats runtime"):
659 if self._sh_run_counter > 1:
662 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
663 self._data[u"tests"][self._test_id][u"show-run"] = dict()
665 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
669 host = groups.group(1)
670 except (AttributeError, IndexError):
673 sock = groups.group(2)
674 except (AttributeError, IndexError):
677 dut = u"dut{nr}".format(
678 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
680 self._data[u'tests'][self._test_id][u'show-run'][dut] = \
685 u"runtime": str(msg.message).replace(u' ', u'').
686 replace(u'\n', u'').replace(u"'", u'"').
687 replace(u'b"', u'"').replace(u'u"', u'"').
692 def _get_telemetry(self, msg):
693 """Called when extraction of VPP telemetry data is required.
695 :param msg: Message to process.
700 if self._telemetry_kw_counter > 1:
702 if not msg.message.count(u"# TYPE vpp_runtime_calls"):
705 if u"telemetry-show-run" not in \
706 self._data[u"tests"][self._test_id].keys():
707 self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
709 self._telemetry_msg_counter += 1
710 groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
714 host = groups.group(1)
715 except (AttributeError, IndexError):
718 sock = groups.group(2)
719 except (AttributeError, IndexError):
722 u"source_type": u"node",
724 u"msg_type": u"metric",
725 u"log_level": u"INFO",
726 u"timestamp": msg.timestamp,
727 u"msg": u"show_runtime",
732 for line in msg.message.splitlines():
733 if not line.startswith(u"vpp_runtime_"):
736 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
737 cut = params.index(u"{")
738 name = params[:cut].split(u"_", maxsplit=2)[-1]
740 u"dict" + params[cut:].replace('{', '(').replace('}', ')')
742 labels[u"graph_node"] = labels.pop(u"name")
743 runtime[u"data"].append(
747 u"timestamp": timestamp,
751 except (TypeError, ValueError, IndexError):
753 self._data[u'tests'][self._test_id][u'telemetry-show-run']\
754 [f"dut{self._telemetry_msg_counter}"] = copy.copy(
762 def _get_ndrpdr_throughput(self, msg):
763 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
766 :param msg: The test message to be parsed.
768 :returns: Parsed data as a dict and the status (PASS/FAIL).
769 :rtype: tuple(dict, str)
773 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
777 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
779 if groups is not None:
781 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
782 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
783 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
784 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
786 except (IndexError, ValueError):
789 return throughput, status
791 def _get_ndrpdr_throughput_gbps(self, msg):
792 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
795 :param msg: The test message to be parsed.
797 :returns: Parsed data as a dict and the status (PASS/FAIL).
798 :rtype: tuple(dict, str)
802 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
803 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
806 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
808 if groups is not None:
810 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
811 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
812 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
813 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
815 except (IndexError, ValueError):
820 def _get_plr_throughput(self, msg):
821 """Get PLRsearch lower bound and PLRsearch upper bound from the test
824 :param msg: The test message to be parsed.
826 :returns: Parsed data as a dict and the status (PASS/FAIL).
827 :rtype: tuple(dict, str)
835 groups = re.search(self.REGEX_PLR_RATE, msg)
837 if groups is not None:
839 throughput[u"LOWER"] = float(groups.group(1))
840 throughput[u"UPPER"] = float(groups.group(2))
842 except (IndexError, ValueError):
845 return throughput, status
847 def _get_ndrpdr_latency(self, msg):
848 """Get LATENCY from the test message.
850 :param msg: The test message to be parsed.
852 :returns: Parsed data as a dict and the status (PASS/FAIL).
853 :rtype: tuple(dict, str)
863 u"direction1": copy.copy(latency_default),
864 u"direction2": copy.copy(latency_default)
867 u"direction1": copy.copy(latency_default),
868 u"direction2": copy.copy(latency_default)
871 u"direction1": copy.copy(latency_default),
872 u"direction2": copy.copy(latency_default)
875 u"direction1": copy.copy(latency_default),
876 u"direction2": copy.copy(latency_default)
879 u"direction1": copy.copy(latency_default),
880 u"direction2": copy.copy(latency_default)
883 u"direction1": copy.copy(latency_default),
884 u"direction2": copy.copy(latency_default)
888 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
890 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
892 return latency, u"FAIL"
894 def process_latency(in_str):
895 """Return object with parsed latency values.
897 TODO: Define class for the return type.
899 :param in_str: Input string, min/avg/max/hdrh format.
901 :returns: Dict with corresponding keys, except hdrh float values.
903 :throws IndexError: If in_str does not have enough substrings.
904 :throws ValueError: If a substring does not convert to float.
906 in_list = in_str.split('/', 3)
909 u"min": float(in_list[0]),
910 u"avg": float(in_list[1]),
911 u"max": float(in_list[2]),
915 if len(in_list) == 4:
916 rval[u"hdrh"] = str(in_list[3])
921 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
922 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
923 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
924 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
925 if groups.lastindex == 4:
926 return latency, u"PASS"
927 except (IndexError, ValueError):
931 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
932 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
933 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
934 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
935 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
936 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
937 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
938 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
939 if groups.lastindex == 12:
940 return latency, u"PASS"
941 except (IndexError, ValueError):
944 return latency, u"FAIL"
947 def _get_hoststack_data(msg, tags):
948 """Get data from the hoststack test message.
950 :param msg: The test message to be parsed.
951 :param tags: Test tags.
954 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
955 :rtype: tuple(dict, str)
960 msg = msg.replace(u"'", u'"').replace(u" ", u"")
961 if u"LDPRELOAD" in tags:
965 except JSONDecodeError:
967 elif u"VPPECHO" in tags:
969 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
971 client=loads(msg_lst[0]),
972 server=loads(msg_lst[1])
975 except (JSONDecodeError, IndexError):
978 return result, status
980 def _get_vsap_data(self, msg, tags):
981 """Get data from the vsap test message.
983 :param msg: The test message to be parsed.
984 :param tags: Test tags.
987 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
988 :rtype: tuple(dict, str)
993 groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
994 if groups is not None:
996 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
997 result[u"latency"] = float(groups.group(2))
998 result[u"completed-requests"] = int(groups.group(3))
999 result[u"failed-requests"] = int(groups.group(4))
1000 result[u"bytes-transferred"] = int(groups.group(5))
1001 if u"TCP_CPS"in tags:
1002 result[u"cps"] = float(groups.group(6))
1003 elif u"TCP_RPS" in tags:
1004 result[u"rps"] = float(groups.group(6))
1006 return result, status
1008 except (IndexError, ValueError):
1011 return result, status
1013 def visit_suite(self, suite):
1014 """Implements traversing through the suite and its direct children.
1016 :param suite: Suite to process.
1020 if self.start_suite(suite) is not False:
1021 suite.suites.visit(self)
1022 suite.tests.visit(self)
1023 self.end_suite(suite)
1025 def start_suite(self, suite):
1026 """Called when suite starts.
1028 :param suite: Suite to process.
1034 parent_name = suite.parent.name
1035 except AttributeError:
1038 self._data[u"suites"][suite.longname.lower().
1039 replace(u'"', u"'").
1040 replace(u" ", u"_")] = {
1041 u"name": suite.name.lower(),
1043 u"parent": parent_name,
1044 u"level": len(suite.longname.split(u"."))
1047 suite.setup.visit(self)
1049 def end_suite(self, suite):
1050 """Called when suite ends.
1052 :param suite: Suite to process.
1057 def visit_test(self, test):
1058 """Implements traversing through the test.
1060 :param test: Test to process.
1064 if self.start_test(test) is not False:
1065 test.body.visit(self)
1068 def start_test(self, test):
1069 """Called when test starts.
1071 :param test: Test to process.
1076 self._sh_run_counter = 0
1077 self._telemetry_kw_counter = 0
1078 self._telemetry_msg_counter = 0
1080 longname_orig = test.longname.lower()
1082 # Check the ignore list
1083 if longname_orig in self._ignore:
1086 tags = [str(tag) for tag in test.tags]
1087 test_result = dict()
1089 # Change the TC long name and name if defined in the mapping table
1090 longname = self._mapping.get(longname_orig, None)
1091 if longname is not None:
1092 name = longname.split(u'.')[-1]
1094 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1098 longname = longname_orig
1099 name = test.name.lower()
1101 # Remove TC number from the TC long name (backward compatibility):
1102 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1103 # Remove TC number from the TC name (not needed):
1104 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1106 test_result[u"parent"] = test.parent.name.lower()
1107 test_result[u"tags"] = tags
1108 test_result["doc"] = test.doc
1109 test_result[u"type"] = u""
1110 test_result[u"status"] = test.status
1111 test_result[u"starttime"] = test.starttime
1112 test_result[u"endtime"] = test.endtime
1114 if test.status == u"PASS":
1115 if u"NDRPDR" in tags:
1116 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1117 test_result[u"msg"] = self._get_data_from_pps_test_msg(
1119 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1120 test_result[u"msg"] = self._get_data_from_cps_test_msg(
1123 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1125 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1126 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1129 test_result[u"msg"] = test.message
1131 test_result[u"msg"] = test.message
1133 if u"PERFTEST" in tags and u"TREX" not in tags:
1134 # Replace info about cores (e.g. -1c-) with the info about threads
1135 # and cores (e.g. -1t1c-) in the long test case names and in the
1136 # test case names if necessary.
1139 for tag in test_result[u"tags"]:
1140 groups = re.search(self.REGEX_TC_TAG, tag)
1146 self._test_id = re.sub(
1147 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1148 self._test_id, count=1
1150 test_result[u"name"] = re.sub(
1151 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1152 test_result["name"], count=1
1155 test_result[u"status"] = u"FAIL"
1156 self._data[u"tests"][self._test_id] = test_result
1158 f"The test {self._test_id} has no or more than one "
1159 f"multi-threading tags.\n"
1160 f"Tags: {test_result[u'tags']}"
1164 if u"DEVICETEST" in tags:
1165 test_result[u"type"] = u"DEVICETEST"
1166 elif u"NDRPDR" in tags:
1167 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1168 test_result[u"type"] = u"CPS"
1170 test_result[u"type"] = u"NDRPDR"
1171 if test.status == u"PASS":
1172 test_result[u"throughput"], test_result[u"status"] = \
1173 self._get_ndrpdr_throughput(test.message)
1174 test_result[u"gbps"], test_result[u"status"] = \
1175 self._get_ndrpdr_throughput_gbps(test.message)
1176 test_result[u"latency"], test_result[u"status"] = \
1177 self._get_ndrpdr_latency(test.message)
1178 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1180 test_result[u"type"] = u"MRR"
1182 test_result[u"type"] = u"BMRR"
1183 if test.status == u"PASS":
1184 test_result[u"result"] = dict()
1185 groups = re.search(self.REGEX_BMRR, test.message)
1186 if groups is not None:
1187 items_str = groups.group(1)
1189 float(item.strip().replace(u"'", u""))
1190 for item in items_str.split(",")
1192 # Use whole list in CSIT-1180.
1193 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1194 test_result[u"result"][u"samples"] = items_float
1195 test_result[u"result"][u"receive-rate"] = stats.avg
1196 test_result[u"result"][u"receive-stdev"] = stats.stdev
1198 groups = re.search(self.REGEX_MRR, test.message)
1199 test_result[u"result"][u"receive-rate"] = \
1200 float(groups.group(3)) / float(groups.group(1))
1201 elif u"SOAK" in tags:
1202 test_result[u"type"] = u"SOAK"
1203 if test.status == u"PASS":
1204 test_result[u"throughput"], test_result[u"status"] = \
1205 self._get_plr_throughput(test.message)
1206 elif u"LDP_NGINX" in tags:
1207 test_result[u"type"] = u"LDP_NGINX"
1208 test_result[u"result"], test_result[u"status"] = \
1209 self._get_vsap_data(test.message, tags)
1210 elif u"HOSTSTACK" in tags:
1211 test_result[u"type"] = u"HOSTSTACK"
1212 if test.status == u"PASS":
1213 test_result[u"result"], test_result[u"status"] = \
1214 self._get_hoststack_data(test.message, tags)
1215 elif u"RECONF" in tags:
1216 test_result[u"type"] = u"RECONF"
1217 if test.status == u"PASS":
1218 test_result[u"result"] = None
1220 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1221 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1222 test_result[u"result"] = {
1223 u"loss": int(grps_loss.group(1)),
1224 u"time": float(grps_time.group(1))
1226 except (AttributeError, IndexError, ValueError, TypeError):
1227 test_result[u"status"] = u"FAIL"
1229 test_result[u"status"] = u"FAIL"
1231 self._data[u"tests"][self._test_id] = test_result
1233 def end_test(self, test):
1234 """Called when test ends.
1236 :param test: Test to process.
1241 def visit_keyword(self, keyword):
1242 """Implements traversing through the keyword and its child keywords.
1244 :param keyword: Keyword to process.
1245 :type keyword: Keyword
1248 if self.start_keyword(keyword) is not False:
1249 self.end_keyword(keyword)
1251 def start_keyword(self, keyword):
1252 """Called when keyword starts. Default implementation does nothing.
1254 :param keyword: Keyword to process.
1255 :type keyword: Keyword
1259 if keyword.type == u"setup":
1260 self.visit_setup_kw(keyword)
1261 elif keyword.type == u"teardown":
1262 self.visit_teardown_kw(keyword)
1264 self.visit_test_kw(keyword)
1265 except AttributeError:
1268 def end_keyword(self, keyword):
1269 """Called when keyword ends. Default implementation does nothing.
1271 :param keyword: Keyword to process.
1272 :type keyword: Keyword
1276 def visit_test_kw(self, test_kw):
1277 """Implements traversing through the test keyword and its child
1280 :param test_kw: Keyword to process.
1281 :type test_kw: Keyword
1284 for keyword in test_kw.body:
1285 if self.start_test_kw(keyword) is not False:
1286 self.visit_test_kw(keyword)
1287 self.end_test_kw(keyword)
1289 def start_test_kw(self, test_kw):
1290 """Called when test keyword starts. Default implementation does
1293 :param test_kw: Keyword to process.
1294 :type test_kw: Keyword
1297 if not self._process_oper:
1300 if test_kw.name.count(u"Run Telemetry On All Duts"):
1301 self._msg_type = u"test-telemetry"
1302 self._telemetry_kw_counter += 1
1303 elif test_kw.name.count(u"Show Runtime On All Duts"):
1304 self._msg_type = u"test-show-runtime"
1305 self._sh_run_counter += 1
1308 test_kw.messages.visit(self)
1310 def end_test_kw(self, test_kw):
1311 """Called when keyword ends. Default implementation does nothing.
1313 :param test_kw: Keyword to process.
1314 :type test_kw: Keyword
1318 def visit_setup_kw(self, setup_kw):
1319 """Implements traversing through the teardown keyword and its child
1322 :param setup_kw: Keyword to process.
1323 :type setup_kw: Keyword
1326 for keyword in setup_kw.body:
1327 if self.start_setup_kw(keyword) is not False:
1328 self.visit_setup_kw(keyword)
1329 self.end_setup_kw(keyword)
1331 def start_setup_kw(self, setup_kw):
1332 """Called when teardown keyword starts. Default implementation does
1335 :param setup_kw: Keyword to process.
1336 :type setup_kw: Keyword
1339 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1340 and not self._version:
1341 self._msg_type = u"vpp-version"
1342 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1344 self._msg_type = u"dpdk-version"
1345 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1346 self._msg_type = u"testbed"
1349 setup_kw.messages.visit(self)
1351 def end_setup_kw(self, setup_kw):
1352 """Called when keyword ends. Default implementation does nothing.
1354 :param setup_kw: Keyword to process.
1355 :type setup_kw: Keyword
1359 def visit_teardown_kw(self, teardown_kw):
1360 """Implements traversing through the teardown keyword and its child
1363 :param teardown_kw: Keyword to process.
1364 :type teardown_kw: Keyword
1367 for keyword in teardown_kw.body:
1368 if self.start_teardown_kw(keyword) is not False:
1369 self.visit_teardown_kw(keyword)
1370 self.end_teardown_kw(keyword)
1372 def start_teardown_kw(self, teardown_kw):
1373 """Called when teardown keyword starts
1375 :param teardown_kw: Keyword to process.
1376 :type teardown_kw: Keyword
1379 if teardown_kw.name.count(u"Show Papi History On All Duts"):
1380 self._conf_history_lookup_nr = 0
1381 self._msg_type = u"teardown-papi-history"
1382 teardown_kw.messages.visit(self)
1384 def end_teardown_kw(self, teardown_kw):
1385 """Called when keyword ends. Default implementation does nothing.
1387 :param teardown_kw: Keyword to process.
1388 :type teardown_kw: Keyword
1392 def visit_message(self, msg):
1393 """Implements visiting the message.
1395 :param msg: Message to process.
1399 if self.start_message(msg) is not False:
1400 self.end_message(msg)
1402 def start_message(self, msg):
1403 """Called when message starts. Get required information from messages:
1406 :param msg: Message to process.
1411 self.parse_msg[self._msg_type](msg)
1413 def end_message(self, msg):
1414 """Called when message ends. Default implementation does nothing.
1416 :param msg: Message to process.
1425 The data is extracted from output.xml files generated by Jenkins jobs and
1426 stored in pandas' DataFrames.
1432 (as described in ExecutionChecker documentation)
1434 (as described in ExecutionChecker documentation)
1436 (as described in ExecutionChecker documentation)
1439 def __init__(self, spec, for_output):
1442 :param spec: Specification.
1443 :param for_output: Output to be generated from downloaded data.
1444 :type spec: Specification
1445 :type for_output: str
1451 self._for_output = for_output
1454 self._input_data = pd.Series(dtype="float64")
1458 """Getter - Input data.
1460 :returns: Input data
1461 :rtype: pandas.Series
1463 return self._input_data
1465 def metadata(self, job, build):
1466 """Getter - metadata
1468 :param job: Job which metadata we want.
1469 :param build: Build which metadata we want.
1473 :rtype: pandas.Series
1475 return self.data[job][build][u"metadata"]
1477 def suites(self, job, build):
1480 :param job: Job which suites we want.
1481 :param build: Build which suites we want.
1485 :rtype: pandas.Series
1487 return self.data[job][str(build)][u"suites"]
1489 def tests(self, job, build):
1492 :param job: Job which tests we want.
1493 :param build: Build which tests we want.
1497 :rtype: pandas.Series
1499 return self.data[job][build][u"tests"]
1501 def _parse_tests(self, job, build):
1502 """Process data from robot output.xml file and return JSON structured
1505 :param job: The name of job which build output data will be processed.
1506 :param build: The build which output data will be processed.
1509 :returns: JSON data structure.
1518 with open(build[u"file-name"], u'r') as data_file:
1520 result = ExecutionResult(data_file)
1521 except errors.DataError as err:
1523 f"Error occurred while parsing output.xml: {repr(err)}"
1527 process_oper = False
1528 if u"-vpp-perf-report-coverage-" in job:
1530 # elif u"-vpp-perf-report-iterative-" in job:
1531 # # Exceptions for TBs where we do not have coverage data:
1532 # for item in (u"-2n-icx", ):
1534 # process_oper = True
1536 checker = ExecutionChecker(
1537 metadata, self._cfg.mapping, self._cfg.ignore, process_oper
1539 result.visit(checker)
1541 checker.data[u"metadata"][u"tests_total"] = \
1542 result.statistics.total.total
1543 checker.data[u"metadata"][u"tests_passed"] = \
1544 result.statistics.total.passed
1545 checker.data[u"metadata"][u"tests_failed"] = \
1546 result.statistics.total.failed
1547 checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1548 checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1552 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1553 """Download and parse the input data file.
1555 :param pid: PID of the process executing this method.
1556 :param job: Name of the Jenkins job which generated the processed input
1558 :param build: Information about the Jenkins build which generated the
1559 processed input file.
1560 :param repeat: Repeat the download specified number of times if not
1568 logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1575 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1581 f"It is not possible to download the input data file from the "
1582 f"job {job}, build {build[u'build']}, or it is damaged. "
1586 logging.info(f" Processing data from build {build[u'build']}")
1587 data = self._parse_tests(job, build)
1590 f"Input data file from the job {job}, build "
1591 f"{build[u'build']} is damaged. Skipped."
1594 state = u"processed"
1597 remove(build[u"file-name"])
1598 except OSError as err:
1600 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1603 # If the time-period is defined in the specification file, remove all
1604 # files which are outside the time period.
1606 timeperiod = self._cfg.environment.get(u"time-period", None)
1607 if timeperiod and data:
1609 timeperiod = timedelta(int(timeperiod))
1610 metadata = data.get(u"metadata", None)
1612 generated = metadata.get(u"generated", None)
1614 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1615 if (now - generated) > timeperiod:
1616 # Remove the data and the file:
1621 f" The build {job}/{build[u'build']} is "
1622 f"outdated, will be removed."
1632 def download_and_parse_data(self, repeat=1):
1633 """Download the input data files, parse input data from input files and
1634 store in pandas' Series.
1636 :param repeat: Repeat the download specified number of times if not
1641 logging.info(u"Downloading and parsing input files ...")
1643 for job, builds in self._cfg.input.items():
1644 for build in builds:
1646 result = self._download_and_parse_build(job, build, repeat)
1649 build_nr = result[u"build"][u"build"]
1652 data = result[u"data"]
1653 build_data = pd.Series({
1654 u"metadata": pd.Series(
1655 list(data[u"metadata"].values()),
1656 index=list(data[u"metadata"].keys())
1658 u"suites": pd.Series(
1659 list(data[u"suites"].values()),
1660 index=list(data[u"suites"].keys())
1662 u"tests": pd.Series(
1663 list(data[u"tests"].values()),
1664 index=list(data[u"tests"].keys())
1668 if self._input_data.get(job, None) is None:
1669 self._input_data[job] = pd.Series(dtype="float64")
1670 self._input_data[job][str(build_nr)] = build_data
1671 self._cfg.set_input_file_name(
1672 job, build_nr, result[u"build"][u"file-name"]
1674 self._cfg.set_input_state(job, build_nr, result[u"state"])
1677 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1678 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1680 logging.info(u"Done.")
1682 msg = f"Successful downloads from the sources:\n"
1683 for source in self._cfg.environment[u"data-sources"]:
1684 if source[u"successful-downloads"]:
1686 f"{source[u'url']}/{source[u'path']}/"
1687 f"{source[u'file-name']}: "
1688 f"{source[u'successful-downloads']}\n"
1692 def process_local_file(self, local_file, job=u"local", build_nr=1,
1694 """Process local XML file given as a command-line parameter.
1696 :param local_file: The file to process.
1697 :param job: Job name.
1698 :param build_nr: Build number.
1699 :param replace: If True, the information about jobs and builds is
1700 replaced by the new one, otherwise the new jobs and builds are
1702 :type local_file: str
1706 :raises: PresentationError if an error occurs.
1708 if not isfile(local_file):
1709 raise PresentationError(f"The file {local_file} does not exist.")
1712 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1713 except (IndexError, ValueError):
1718 u"status": u"failed",
1719 u"file-name": local_file
1722 self._cfg.input = dict()
1723 self._cfg.add_build(job, build)
1725 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1726 data = self._parse_tests(job, build)
1728 raise PresentationError(
1729 f"Error occurred while parsing the file {local_file}"
1732 build_data = pd.Series({
1733 u"metadata": pd.Series(
1734 list(data[u"metadata"].values()),
1735 index=list(data[u"metadata"].keys())
1737 u"suites": pd.Series(
1738 list(data[u"suites"].values()),
1739 index=list(data[u"suites"].keys())
1741 u"tests": pd.Series(
1742 list(data[u"tests"].values()),
1743 index=list(data[u"tests"].keys())
1747 if self._input_data.get(job, None) is None:
1748 self._input_data[job] = pd.Series(dtype="float64")
1749 self._input_data[job][str(build_nr)] = build_data
1751 self._cfg.set_input_state(job, build_nr, u"processed")
1753 def process_local_directory(self, local_dir, replace=True):
1754 """Process local directory with XML file(s). The directory is processed
1755 as a 'job' and the XML files in it as builds.
1756 If the given directory contains only sub-directories, these
1757 sub-directories processed as jobs and corresponding XML files as builds
1760 :param local_dir: Local directory to process.
1761 :param replace: If True, the information about jobs and builds is
1762 replaced by the new one, otherwise the new jobs and builds are
1764 :type local_dir: str
1767 if not isdir(local_dir):
1768 raise PresentationError(
1769 f"The directory {local_dir} does not exist."
1772 # Check if the given directory includes only files, or only directories
1773 _, dirnames, filenames = next(walk(local_dir))
1775 if filenames and not dirnames:
1778 # key: dir (job) name, value: list of file names (builds)
1780 local_dir: [join(local_dir, name) for name in filenames]
1783 elif dirnames and not filenames:
1786 # key: dir (job) name, value: list of file names (builds)
1787 local_builds = dict()
1788 for dirname in dirnames:
1790 join(local_dir, dirname, name)
1791 for name in listdir(join(local_dir, dirname))
1792 if isfile(join(local_dir, dirname, name))
1795 local_builds[dirname] = sorted(builds)
1797 elif not filenames and not dirnames:
1798 raise PresentationError(f"The directory {local_dir} is empty.")
1800 raise PresentationError(
1801 f"The directory {local_dir} can include only files or only "
1802 f"directories, not both.\nThe directory {local_dir} includes "
1803 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1807 self._cfg.input = dict()
1809 for job, files in local_builds.items():
1810 for idx, local_file in enumerate(files):
1811 self.process_local_file(local_file, job, idx + 1, replace=False)
1814 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1815 """Return the index of character in the string which is the end of tag.
1817 :param tag_filter: The string where the end of tag is being searched.
1818 :param start: The index where the searching is stated.
1819 :param closer: The character which is the tag closer.
1820 :type tag_filter: str
1823 :returns: The index of the tag closer.
1827 idx_opener = tag_filter.index(closer, start)
1828 return tag_filter.index(closer, idx_opener + 1)
1833 def _condition(tag_filter):
1834 """Create a conditional statement from the given tag filter.
1836 :param tag_filter: Filter based on tags from the element specification.
1837 :type tag_filter: str
1838 :returns: Conditional statement which can be evaluated.
1843 index = InputData._end_of_tag(tag_filter, index)
1847 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1849 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1850 continue_on_error=False):
1851 """Filter required data from the given jobs and builds.
1853 The output data structure is:
1856 - test (or suite) 1 ID:
1862 - test (or suite) n ID:
1869 :param element: Element which will use the filtered data.
1870 :param params: Parameters which will be included in the output. If None,
1871 all parameters are included.
1872 :param data: If not None, this data is used instead of data specified
1874 :param data_set: The set of data to be filtered: tests, suites,
1876 :param continue_on_error: Continue if there is error while reading the
1877 data. The Item will be empty then
1878 :type element: pandas.Series
1882 :type continue_on_error: bool
1883 :returns: Filtered data.
1884 :rtype pandas.Series
1888 if data_set == "suites":
1890 elif element[u"filter"] in (u"all", u"template"):
1893 cond = InputData._condition(element[u"filter"])
1894 logging.debug(f" Filter: {cond}")
1896 logging.error(u" No filter defined.")
1900 params = element.get(u"parameters", None)
1902 params.extend((u"type", u"status"))
1904 data_to_filter = data if data else element[u"data"]
1905 data = pd.Series(dtype="float64")
1907 for job, builds in data_to_filter.items():
1908 data[job] = pd.Series(dtype="float64")
1909 for build in builds:
1910 data[job][str(build)] = pd.Series(dtype="float64")
1913 self.data[job][str(build)][data_set].items())
1915 if continue_on_error:
1919 for test_id, test_data in data_dict.items():
1920 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1921 data[job][str(build)][test_id] = \
1922 pd.Series(dtype="float64")
1924 for param, val in test_data.items():
1925 data[job][str(build)][test_id][param] = val
1927 for param in params:
1929 data[job][str(build)][test_id][param] =\
1932 data[job][str(build)][test_id][param] =\
1936 except (KeyError, IndexError, ValueError) as err:
1938 f"Missing mandatory parameter in the element specification: "
1942 except AttributeError as err:
1943 logging.error(repr(err))
1945 except SyntaxError as err:
1947 f"The filter {cond} is not correct. Check if all tags are "
1948 f"enclosed by apostrophes.\n{repr(err)}"
1952 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1953 continue_on_error=False):
1954 """Filter required data from the given jobs and builds.
1956 The output data structure is:
1959 - test (or suite) 1 ID:
1965 - test (or suite) n ID:
1972 :param element: Element which will use the filtered data.
1973 :param params: Parameters which will be included in the output. If None,
1974 all parameters are included.
1975 :param data_set: The set of data to be filtered: tests, suites,
1977 :param continue_on_error: Continue if there is error while reading the
1978 data. The Item will be empty then
1979 :type element: pandas.Series
1982 :type continue_on_error: bool
1983 :returns: Filtered data.
1984 :rtype pandas.Series
1987 include = element.get(u"include", None)
1989 logging.warning(u"No tests to include, skipping the element.")
1993 params = element.get(u"parameters", None)
1994 if params and u"type" not in params:
1995 params.append(u"type")
1997 cores = element.get(u"core", None)
2001 for test in include:
2002 tests.append(test.format(core=core))
2006 data = pd.Series(dtype="float64")
2008 for job, builds in element[u"data"].items():
2009 data[job] = pd.Series(dtype="float64")
2010 for build in builds:
2011 data[job][str(build)] = pd.Series(dtype="float64")
2014 reg_ex = re.compile(str(test).lower())
2015 for test_id in self.data[job][
2016 str(build)][data_set].keys():
2017 if re.match(reg_ex, str(test_id).lower()):
2018 test_data = self.data[job][
2019 str(build)][data_set][test_id]
2020 data[job][str(build)][test_id] = \
2021 pd.Series(dtype="float64")
2023 for param, val in test_data.items():
2024 data[job][str(build)][test_id]\
2027 for param in params:
2029 data[job][str(build)][
2033 data[job][str(build)][
2034 test_id][param] = u"No Data"
2035 except KeyError as err:
2036 if continue_on_error:
2037 logging.debug(repr(err))
2039 logging.error(repr(err))
2043 except (KeyError, IndexError, ValueError) as err:
2045 f"Missing mandatory parameter in the element "
2046 f"specification: {repr(err)}"
2049 except AttributeError as err:
2050 logging.error(repr(err))
2054 def merge_data(data):
2055 """Merge data from more jobs and builds to a simple data structure.
2057 The output data structure is:
2059 - test (suite) 1 ID:
2065 - test (suite) n ID:
2068 :param data: Data to merge.
2069 :type data: pandas.Series
2070 :returns: Merged data.
2071 :rtype: pandas.Series
2074 logging.info(u" Merging data ...")
2076 merged_data = pd.Series(dtype="float64")
2077 for builds in data.values:
2078 for item in builds.values:
2079 for item_id, item_data in item.items():
2080 merged_data[item_id] = item_data
2083 def print_all_oper_data(self):
2084 """Print all operational data to console.
2087 for job in self._input_data.values:
2088 for build in job.values:
2089 for test_id, test_data in build[u"tests"].items():
2091 if test_data.get(u"show-run", None) is None:
2093 for dut_name, data in test_data[u"show-run"].items():
2094 if data.get(u"runtime", None) is None:
2096 runtime = loads(data[u"runtime"])
2098 threads_nr = len(runtime[0][u"clocks"])
2099 except (IndexError, KeyError):
2101 threads = OrderedDict(
2102 {idx: list() for idx in range(threads_nr)})
2103 for item in runtime:
2104 for idx in range(threads_nr):
2105 if item[u"vectors"][idx] > 0:
2106 clocks = item[u"clocks"][idx] / \
2107 item[u"vectors"][idx]
2108 elif item[u"calls"][idx] > 0:
2109 clocks = item[u"clocks"][idx] / \
2111 elif item[u"suspends"][idx] > 0:
2112 clocks = item[u"clocks"][idx] / \
2113 item[u"suspends"][idx]
2117 if item[u"calls"][idx] > 0:
2118 vectors_call = item[u"vectors"][idx] / \
2123 if int(item[u"calls"][idx]) + int(
2124 item[u"vectors"][idx]) + \
2125 int(item[u"suspends"][idx]):
2126 threads[idx].append([
2128 item[u"calls"][idx],
2129 item[u"vectors"][idx],
2130 item[u"suspends"][idx],
2135 print(f"Host IP: {data.get(u'host', '')}, "
2136 f"Socket: {data.get(u'socket', '')}")
2137 for thread_nr, thread in threads.items():
2138 txt_table = prettytable.PrettyTable(
2144 u"Cycles per Packet",
2145 u"Average Vector Size"
2150 txt_table.add_row(row)
2152 if len(thread) == 0:
2155 avg = f", Average Vector Size per Node: " \
2156 f"{(avg / len(thread)):.2f}"
2157 th_name = u"main" if thread_nr == 0 \
2158 else f"worker_{thread_nr}"
2159 print(f"{dut_name}, {th_name}{avg}")
2160 txt_table.float_format = u".2"
2161 txt_table.align = u"r"
2162 txt_table.align[u"Name"] = u"l"
2163 print(f"{txt_table.get_string()}\n")