1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_CPS_MSG_INFO = re.compile(
233 r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234 r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
236 REGEX_PPS_MSG_INFO = re.compile(
237 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
240 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
242 REGEX_VSAP_MSG_INFO = re.compile(
243 r'Transfer Rate: (\d*.\d*).*\n'
244 r'Latency: (\d*.\d*).*\n'
245 r'Completed requests: (\d*).*\n'
246 r'Failed requests: (\d*).*\n'
247 r'Total data transferred: (\d*).*\n'
248 r'Connection [cr]ps rate:\s*(\d*.\d*)'
251 # Needed for CPS and PPS tests
252 REGEX_NDRPDR_LAT_BASE = re.compile(
253 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
256 REGEX_NDRPDR_LAT = re.compile(
257 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
265 REGEX_VERSION_VPP = re.compile(
266 r"(return STDOUT Version:\s*|"
267 r"VPP Version:\s*|VPP version:\s*)(.*)"
269 REGEX_VERSION_DPDK = re.compile(
270 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
272 REGEX_TCP = re.compile(
273 r'Total\s(rps|cps|throughput):\s(\d*).*$'
275 REGEX_MRR = re.compile(
276 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277 r'tx\s(\d*),\srx\s(\d*)'
279 REGEX_BMRR = re.compile(
280 r'.*trial results.*: \[(.*)\]'
282 REGEX_RECONF_LOSS = re.compile(
283 r'Packets lost due to reconfig: (\d*)'
285 REGEX_RECONF_TIME = re.compile(
286 r'Implied time lost: (\d*.[\de-]*)'
288 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
290 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
292 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
294 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
296 REGEX_SH_RUN_HOST = re.compile(
297 r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
300 def __init__(self, metadata, mapping, ignore, for_output):
303 :param metadata: Key-value pairs to be included in "metadata" part of
305 :param mapping: Mapping of the old names of test cases to the new
307 :param ignore: List of TCs to be ignored.
308 :param for_output: Output to be generated from downloaded data.
312 :type for_output: str
315 # Type of message to parse out from the test messages
316 self._msg_type = None
322 self._timestamp = None
324 # Testbed. The testbed is identified by TG node IP address.
327 # Mapping of TCs long names
328 self._mapping = mapping
331 self._ignore = ignore
333 self._for_output = for_output
335 # Number of PAPI History messages found:
337 # 1 - PAPI History of DUT1
338 # 2 - PAPI History of DUT2
339 self._conf_history_lookup_nr = 0
341 self._sh_run_counter = 0
342 self._telemetry_kw_counter = 0
343 self._telemetry_msg_counter = 0
345 # Test ID of currently processed test- the lowercase full path to the
349 # The main data structure
351 u"metadata": OrderedDict(),
352 u"suites": OrderedDict(),
353 u"tests": OrderedDict()
356 # Save the provided metadata
357 for key, val in metadata.items():
358 self._data[u"metadata"][key] = val
360 # Dictionary defining the methods used to parse different types of
363 u"vpp-version": self._get_vpp_version,
364 u"dpdk-version": self._get_dpdk_version,
365 u"teardown-papi-history": self._get_papi_history,
366 u"test-show-runtime": self._get_show_run,
367 u"testbed": self._get_testbed,
368 u"test-telemetry": self._get_telemetry
373 """Getter - Data parsed from the XML file.
375 :returns: Data parsed from the XML file.
380 def _get_data_from_mrr_test_msg(self, msg):
381 """Get info from message of MRR performance tests.
383 :param msg: Message to be processed.
385 :returns: Processed message or original message if a problem occurs.
389 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
390 if not groups or groups.lastindex != 1:
391 return u"Test Failed."
394 data = groups.group(1).split(u", ")
395 except (AttributeError, IndexError, ValueError, KeyError):
396 return u"Test Failed."
401 out_str += f"{(float(item) / 1e6):.2f}, "
402 return out_str[:-2] + u"]"
403 except (AttributeError, IndexError, ValueError, KeyError):
404 return u"Test Failed."
406 def _get_data_from_cps_test_msg(self, msg):
407 """Get info from message of NDRPDR CPS tests.
409 :param msg: Message to be processed.
411 :returns: Processed message or "Test Failed." if a problem occurs.
415 groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
416 if not groups or groups.lastindex != 2:
417 return u"Test Failed."
421 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
422 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
424 except (AttributeError, IndexError, ValueError, KeyError):
425 return u"Test Failed."
427 def _get_data_from_pps_test_msg(self, msg):
428 """Get info from message of NDRPDR PPS tests.
430 :param msg: Message to be processed.
432 :returns: Processed message or "Test Failed." if a problem occurs.
436 groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
437 if not groups or groups.lastindex != 4:
438 return u"Test Failed."
442 f"1. {(float(groups.group(1)) / 1e6):5.2f} "
443 f"{float(groups.group(2)):5.2f}\n"
444 f"2. {(float(groups.group(3)) / 1e6):5.2f} "
445 f"{float(groups.group(4)):5.2f}"
447 except (AttributeError, IndexError, ValueError, KeyError):
448 return u"Test Failed."
450 def _get_data_from_perf_test_msg(self, msg):
451 """Get info from message of NDRPDR performance tests.
453 :param msg: Message to be processed.
455 :returns: Processed message or "Test Failed." if a problem occurs.
459 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
460 if not groups or groups.lastindex != 10:
461 return u"Test Failed."
465 u"ndr_low": float(groups.group(1)),
466 u"ndr_low_b": float(groups.group(2)),
467 u"pdr_low": float(groups.group(3)),
468 u"pdr_low_b": float(groups.group(4)),
469 u"pdr_lat_90_1": groups.group(5),
470 u"pdr_lat_90_2": groups.group(6),
471 u"pdr_lat_50_1": groups.group(7),
472 u"pdr_lat_50_2": groups.group(8),
473 u"pdr_lat_10_1": groups.group(9),
474 u"pdr_lat_10_2": groups.group(10),
476 except (AttributeError, IndexError, ValueError, KeyError):
477 return u"Test Failed."
479 def _process_lat(in_str_1, in_str_2):
480 """Extract P50, P90 and P99 latencies or min, avg, max values from
483 :param in_str_1: Latency string for one direction produced by robot
485 :param in_str_2: Latency string for second direction produced by
489 :returns: Processed latency string or None if a problem occurs.
492 in_list_1 = in_str_1.split('/', 3)
493 in_list_2 = in_str_2.split('/', 3)
495 if len(in_list_1) != 4 and len(in_list_2) != 4:
498 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
500 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
501 except hdrh.codec.HdrLengthException:
504 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
506 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
507 except hdrh.codec.HdrLengthException:
510 if hdr_lat_1 and hdr_lat_2:
512 hdr_lat_1.get_value_at_percentile(50.0),
513 hdr_lat_1.get_value_at_percentile(90.0),
514 hdr_lat_1.get_value_at_percentile(99.0),
515 hdr_lat_2.get_value_at_percentile(50.0),
516 hdr_lat_2.get_value_at_percentile(90.0),
517 hdr_lat_2.get_value_at_percentile(99.0)
523 in_list_1[0], in_list_1[1], in_list_1[2],
524 in_list_2[0], in_list_2[1], in_list_2[2]
527 if item in (u"-1", u"4294967295", u"0"):
535 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
536 f"{data[u'ndr_low_b']:5.2f}"
537 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
538 f"{data[u'pdr_low_b']:5.2f}"
541 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
542 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
543 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
546 max_len = len(str(max((max(item) for item in latency))))
547 max_len = 4 if max_len < 4 else max_len
549 for idx, lat in enumerate(latency):
554 f"{lat[0]:{max_len}d} "
555 f"{lat[1]:{max_len}d} "
556 f"{lat[2]:{max_len}d} "
557 f"{lat[3]:{max_len}d} "
558 f"{lat[4]:{max_len}d} "
559 f"{lat[5]:{max_len}d} "
564 except (AttributeError, IndexError, ValueError, KeyError):
565 return u"Test Failed."
567 def _get_testbed(self, msg):
568 """Called when extraction of testbed IP is required.
569 The testbed is identified by TG node IP address.
571 :param msg: Message to process.
576 if msg.message.count(u"Setup of TG node") or \
577 msg.message.count(u"Setup of node TG host"):
578 reg_tg_ip = re.compile(
579 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
581 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
582 except (KeyError, ValueError, IndexError, AttributeError):
585 self._data[u"metadata"][u"testbed"] = self._testbed
586 self._msg_type = None
588 def _get_vpp_version(self, msg):
589 """Called when extraction of VPP version is required.
591 :param msg: Message to process.
596 if msg.message.count(u"return STDOUT Version:") or \
597 msg.message.count(u"VPP Version:") or \
598 msg.message.count(u"VPP version:"):
600 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
602 self._data[u"metadata"][u"version"] = self._version
603 self._msg_type = None
605 def _get_dpdk_version(self, msg):
606 """Called when extraction of DPDK version is required.
608 :param msg: Message to process.
613 if msg.message.count(u"DPDK Version:"):
615 self._version = str(re.search(
616 self.REGEX_VERSION_DPDK, msg.message).group(2))
617 self._data[u"metadata"][u"version"] = self._version
621 self._msg_type = None
623 def _get_papi_history(self, msg):
624 """Called when extraction of PAPI command history is required.
626 :param msg: Message to process.
630 if msg.message.count(u"PAPI command history:"):
631 self._conf_history_lookup_nr += 1
632 if self._conf_history_lookup_nr == 1:
633 self._data[u"tests"][self._test_id][u"conf-history"] = str()
635 self._msg_type = None
637 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
641 ).replace(u'"', u"'")
642 self._data[u"tests"][self._test_id][u"conf-history"] += (
643 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
646 def _get_show_run(self, msg):
647 """Called when extraction of VPP operational data (output of CLI command
648 Show Runtime) is required.
650 :param msg: Message to process.
655 if not msg.message.count(u"stats runtime"):
659 if self._sh_run_counter > 1:
662 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
663 self._data[u"tests"][self._test_id][u"show-run"] = dict()
665 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
669 host = groups.group(1)
670 except (AttributeError, IndexError):
673 sock = groups.group(2)
674 except (AttributeError, IndexError):
677 dut = u"dut{nr}".format(
678 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
680 self._data[u'tests'][self._test_id][u'show-run'][dut] = \
685 u"runtime": str(msg.message).replace(u' ', u'').
686 replace(u'\n', u'').replace(u"'", u'"').
687 replace(u'b"', u'"').replace(u'u"', u'"').
692 def _get_telemetry(self, msg):
693 """Called when extraction of VPP telemetry data is required.
695 :param msg: Message to process.
700 if self._telemetry_kw_counter > 1:
702 if not msg.message.count(u"# TYPE vpp_runtime_calls"):
705 if u"telemetry-show-run" not in \
706 self._data[u"tests"][self._test_id].keys():
707 self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
709 self._telemetry_msg_counter += 1
710 groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
714 host = groups.group(1)
715 except (AttributeError, IndexError):
718 sock = groups.group(2)
719 except (AttributeError, IndexError):
722 u"source_type": u"node",
724 u"msg_type": u"metric",
725 u"log_level": u"INFO",
726 u"timestamp": msg.timestamp,
727 u"msg": u"show_runtime",
732 for line in msg.message.splitlines():
733 if not line.startswith(u"vpp_runtime_"):
736 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
737 cut = params.index(u"{")
738 name = params[:cut].split(u"_", maxsplit=2)[-1]
740 u"dict" + params[cut:].replace('{', '(').replace('}', ')')
742 labels[u"graph_node"] = labels.pop(u"name")
743 runtime[u"data"].append(
747 u"timestamp": timestamp,
751 except (TypeError, ValueError, IndexError):
753 self._data[u'tests'][self._test_id][u'telemetry-show-run']\
754 [f"dut{self._telemetry_msg_counter}"] = copy.copy(
762 def _get_ndrpdr_throughput(self, msg):
763 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
766 :param msg: The test message to be parsed.
768 :returns: Parsed data as a dict and the status (PASS/FAIL).
769 :rtype: tuple(dict, str)
773 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
777 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
779 if groups is not None:
781 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
782 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
783 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
784 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
786 except (IndexError, ValueError):
789 return throughput, status
791 def _get_ndrpdr_throughput_gbps(self, msg):
792 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
795 :param msg: The test message to be parsed.
797 :returns: Parsed data as a dict and the status (PASS/FAIL).
798 :rtype: tuple(dict, str)
802 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
803 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
806 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
808 if groups is not None:
810 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
811 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
812 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
813 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
815 except (IndexError, ValueError):
820 def _get_plr_throughput(self, msg):
821 """Get PLRsearch lower bound and PLRsearch upper bound from the test
824 :param msg: The test message to be parsed.
826 :returns: Parsed data as a dict and the status (PASS/FAIL).
827 :rtype: tuple(dict, str)
835 groups = re.search(self.REGEX_PLR_RATE, msg)
837 if groups is not None:
839 throughput[u"LOWER"] = float(groups.group(1))
840 throughput[u"UPPER"] = float(groups.group(2))
842 except (IndexError, ValueError):
845 return throughput, status
847 def _get_ndrpdr_latency(self, msg):
848 """Get LATENCY from the test message.
850 :param msg: The test message to be parsed.
852 :returns: Parsed data as a dict and the status (PASS/FAIL).
853 :rtype: tuple(dict, str)
863 u"direction1": copy.copy(latency_default),
864 u"direction2": copy.copy(latency_default)
867 u"direction1": copy.copy(latency_default),
868 u"direction2": copy.copy(latency_default)
871 u"direction1": copy.copy(latency_default),
872 u"direction2": copy.copy(latency_default)
875 u"direction1": copy.copy(latency_default),
876 u"direction2": copy.copy(latency_default)
879 u"direction1": copy.copy(latency_default),
880 u"direction2": copy.copy(latency_default)
883 u"direction1": copy.copy(latency_default),
884 u"direction2": copy.copy(latency_default)
888 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
890 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
892 return latency, u"FAIL"
894 def process_latency(in_str):
895 """Return object with parsed latency values.
897 TODO: Define class for the return type.
899 :param in_str: Input string, min/avg/max/hdrh format.
901 :returns: Dict with corresponding keys, except hdrh float values.
903 :throws IndexError: If in_str does not have enough substrings.
904 :throws ValueError: If a substring does not convert to float.
906 in_list = in_str.split('/', 3)
909 u"min": float(in_list[0]),
910 u"avg": float(in_list[1]),
911 u"max": float(in_list[2]),
915 if len(in_list) == 4:
916 rval[u"hdrh"] = str(in_list[3])
921 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
922 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
923 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
924 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
925 if groups.lastindex == 4:
926 return latency, u"PASS"
927 except (IndexError, ValueError):
931 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
932 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
933 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
934 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
935 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
936 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
937 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
938 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
939 if groups.lastindex == 12:
940 return latency, u"PASS"
941 except (IndexError, ValueError):
944 return latency, u"FAIL"
947 def _get_hoststack_data(msg, tags):
948 """Get data from the hoststack test message.
950 :param msg: The test message to be parsed.
951 :param tags: Test tags.
954 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
955 :rtype: tuple(dict, str)
960 msg = msg.replace(u"'", u'"').replace(u" ", u"")
961 if u"LDPRELOAD" in tags:
965 except JSONDecodeError:
967 elif u"VPPECHO" in tags:
969 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
971 client=loads(msg_lst[0]),
972 server=loads(msg_lst[1])
975 except (JSONDecodeError, IndexError):
978 return result, status
980 def _get_vsap_data(self, msg, tags):
981 """Get data from the vsap test message.
983 :param msg: The test message to be parsed.
984 :param tags: Test tags.
987 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
988 :rtype: tuple(dict, str)
993 groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
994 if groups is not None:
996 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
997 result[u"latency"] = float(groups.group(2))
998 result[u"completed-requests"] = int(groups.group(3))
999 result[u"failed-requests"] = int(groups.group(4))
1000 result[u"bytes-transferred"] = int(groups.group(5))
1001 if u"TCP_CPS"in tags:
1002 result[u"cps"] = float(groups.group(6))
1003 elif u"TCP_RPS" in tags:
1004 result[u"rps"] = float(groups.group(6))
1006 return result, status
1008 except (IndexError, ValueError):
1011 return result, status
1013 def visit_suite(self, suite):
1014 """Implements traversing through the suite and its direct children.
1016 :param suite: Suite to process.
1020 if self.start_suite(suite) is not False:
1021 suite.suites.visit(self)
1022 suite.tests.visit(self)
1023 self.end_suite(suite)
1025 def start_suite(self, suite):
1026 """Called when suite starts.
1028 :param suite: Suite to process.
1034 parent_name = suite.parent.name
1035 except AttributeError:
1038 self._data[u"suites"][suite.longname.lower().
1039 replace(u'"', u"'").
1040 replace(u" ", u"_")] = {
1041 u"name": suite.name.lower(),
1043 u"parent": parent_name,
1044 u"level": len(suite.longname.split(u"."))
1047 suite.keywords.visit(self)
1049 def end_suite(self, suite):
1050 """Called when suite ends.
1052 :param suite: Suite to process.
1057 def visit_test(self, test):
1058 """Implements traversing through the test.
1060 :param test: Test to process.
1064 if self.start_test(test) is not False:
1065 test.keywords.visit(self)
1068 def start_test(self, test):
1069 """Called when test starts.
1071 :param test: Test to process.
1076 self._sh_run_counter = 0
1077 self._telemetry_kw_counter = 0
1078 self._telemetry_msg_counter = 0
1080 longname_orig = test.longname.lower()
1082 # Check the ignore list
1083 if longname_orig in self._ignore:
1086 tags = [str(tag) for tag in test.tags]
1087 test_result = dict()
1089 # Change the TC long name and name if defined in the mapping table
1090 longname = self._mapping.get(longname_orig, None)
1091 if longname is not None:
1092 name = longname.split(u'.')[-1]
1094 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1098 longname = longname_orig
1099 name = test.name.lower()
1101 # Remove TC number from the TC long name (backward compatibility):
1102 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1103 # Remove TC number from the TC name (not needed):
1104 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1106 test_result[u"parent"] = test.parent.name.lower()
1107 test_result[u"tags"] = tags
1108 test_result["doc"] = test.doc
1109 test_result[u"type"] = u""
1110 test_result[u"status"] = test.status
1111 test_result[u"starttime"] = test.starttime
1112 test_result[u"endtime"] = test.endtime
1114 if test.status == u"PASS":
1115 if u"NDRPDR" in tags:
1116 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1117 test_result[u"msg"] = self._get_data_from_pps_test_msg(
1119 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1120 test_result[u"msg"] = self._get_data_from_cps_test_msg(
1123 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1125 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1126 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1129 test_result[u"msg"] = test.message
1131 test_result[u"msg"] = test.message
1133 if u"PERFTEST" in tags and u"TREX" not in tags:
1134 # Replace info about cores (e.g. -1c-) with the info about threads
1135 # and cores (e.g. -1t1c-) in the long test case names and in the
1136 # test case names if necessary.
1139 for tag in test_result[u"tags"]:
1140 groups = re.search(self.REGEX_TC_TAG, tag)
1146 self._test_id = re.sub(
1147 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1148 self._test_id, count=1
1150 test_result[u"name"] = re.sub(
1151 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1152 test_result["name"], count=1
1155 test_result[u"status"] = u"FAIL"
1156 self._data[u"tests"][self._test_id] = test_result
1158 f"The test {self._test_id} has no or more than one "
1159 f"multi-threading tags.\n"
1160 f"Tags: {test_result[u'tags']}"
1164 if u"DEVICETEST" in tags:
1165 test_result[u"type"] = u"DEVICETEST"
1166 elif u"NDRPDR" in tags:
1167 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1168 test_result[u"type"] = u"CPS"
1170 test_result[u"type"] = u"NDRPDR"
1171 if test.status == u"PASS":
1172 test_result[u"throughput"], test_result[u"status"] = \
1173 self._get_ndrpdr_throughput(test.message)
1174 test_result[u"gbps"], test_result[u"status"] = \
1175 self._get_ndrpdr_throughput_gbps(test.message)
1176 test_result[u"latency"], test_result[u"status"] = \
1177 self._get_ndrpdr_latency(test.message)
1178 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1180 test_result[u"type"] = u"MRR"
1182 test_result[u"type"] = u"BMRR"
1183 if test.status == u"PASS":
1184 test_result[u"result"] = dict()
1185 groups = re.search(self.REGEX_BMRR, test.message)
1186 if groups is not None:
1187 items_str = groups.group(1)
1189 float(item.strip().replace(u"'", u""))
1190 for item in items_str.split(",")
1192 # Use whole list in CSIT-1180.
1193 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1194 test_result[u"result"][u"samples"] = items_float
1195 test_result[u"result"][u"receive-rate"] = stats.avg
1196 test_result[u"result"][u"receive-stdev"] = stats.stdev
1198 groups = re.search(self.REGEX_MRR, test.message)
1199 test_result[u"result"][u"receive-rate"] = \
1200 float(groups.group(3)) / float(groups.group(1))
1201 elif u"SOAK" in tags:
1202 test_result[u"type"] = u"SOAK"
1203 if test.status == u"PASS":
1204 test_result[u"throughput"], test_result[u"status"] = \
1205 self._get_plr_throughput(test.message)
1206 elif u"HOSTSTACK" in tags:
1207 test_result[u"type"] = u"HOSTSTACK"
1208 if test.status == u"PASS":
1209 test_result[u"result"], test_result[u"status"] = \
1210 self._get_hoststack_data(test.message, tags)
1211 elif u"LDP_NGINX" in tags:
1212 test_result[u"type"] = u"LDP_NGINX"
1213 test_result[u"result"], test_result[u"status"] = \
1214 self._get_vsap_data(test.message, tags)
1215 # elif u"TCP" in tags: # This might be not used
1216 # test_result[u"type"] = u"TCP"
1217 # if test.status == u"PASS":
1218 # groups = re.search(self.REGEX_TCP, test.message)
1219 # test_result[u"result"] = int(groups.group(2))
1220 elif u"RECONF" in tags:
1221 test_result[u"type"] = u"RECONF"
1222 if test.status == u"PASS":
1223 test_result[u"result"] = None
1225 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1226 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1227 test_result[u"result"] = {
1228 u"loss": int(grps_loss.group(1)),
1229 u"time": float(grps_time.group(1))
1231 except (AttributeError, IndexError, ValueError, TypeError):
1232 test_result[u"status"] = u"FAIL"
1234 test_result[u"status"] = u"FAIL"
1236 self._data[u"tests"][self._test_id] = test_result
1238 def end_test(self, test):
1239 """Called when test ends.
1241 :param test: Test to process.
1246 def visit_keyword(self, keyword):
1247 """Implements traversing through the keyword and its child keywords.
1249 :param keyword: Keyword to process.
1250 :type keyword: Keyword
1253 if self.start_keyword(keyword) is not False:
1254 self.end_keyword(keyword)
1256 def start_keyword(self, keyword):
1257 """Called when keyword starts. Default implementation does nothing.
1259 :param keyword: Keyword to process.
1260 :type keyword: Keyword
1264 if keyword.type == u"setup":
1265 self.visit_setup_kw(keyword)
1266 elif keyword.type == u"teardown":
1267 self.visit_teardown_kw(keyword)
1269 self.visit_test_kw(keyword)
1270 except AttributeError:
1273 def end_keyword(self, keyword):
1274 """Called when keyword ends. Default implementation does nothing.
1276 :param keyword: Keyword to process.
1277 :type keyword: Keyword
1281 def visit_test_kw(self, test_kw):
1282 """Implements traversing through the test keyword and its child
1285 :param test_kw: Keyword to process.
1286 :type test_kw: Keyword
1289 for keyword in test_kw.keywords:
1290 if self.start_test_kw(keyword) is not False:
1291 self.visit_test_kw(keyword)
1292 self.end_test_kw(keyword)
1294 def start_test_kw(self, test_kw):
1295 """Called when test keyword starts. Default implementation does
1298 :param test_kw: Keyword to process.
1299 :type test_kw: Keyword
1302 if self._for_output == u"trending":
1305 if test_kw.name.count(u"Run Telemetry On All Duts"):
1306 self._msg_type = u"test-telemetry"
1307 self._telemetry_kw_counter += 1
1308 elif test_kw.name.count(u"Show Runtime On All Duts"):
1309 self._msg_type = u"test-show-runtime"
1310 self._sh_run_counter += 1
1313 test_kw.messages.visit(self)
1315 def end_test_kw(self, test_kw):
1316 """Called when keyword ends. Default implementation does nothing.
1318 :param test_kw: Keyword to process.
1319 :type test_kw: Keyword
1323 def visit_setup_kw(self, setup_kw):
1324 """Implements traversing through the teardown keyword and its child
1327 :param setup_kw: Keyword to process.
1328 :type setup_kw: Keyword
1331 for keyword in setup_kw.keywords:
1332 if self.start_setup_kw(keyword) is not False:
1333 self.visit_setup_kw(keyword)
1334 self.end_setup_kw(keyword)
1336 def start_setup_kw(self, setup_kw):
1337 """Called when teardown keyword starts. Default implementation does
1340 :param setup_kw: Keyword to process.
1341 :type setup_kw: Keyword
1344 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1345 and not self._version:
1346 self._msg_type = u"vpp-version"
1347 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1349 self._msg_type = u"dpdk-version"
1350 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1351 self._msg_type = u"testbed"
1354 setup_kw.messages.visit(self)
1356 def end_setup_kw(self, setup_kw):
1357 """Called when keyword ends. Default implementation does nothing.
1359 :param setup_kw: Keyword to process.
1360 :type setup_kw: Keyword
1364 def visit_teardown_kw(self, teardown_kw):
1365 """Implements traversing through the teardown keyword and its child
1368 :param teardown_kw: Keyword to process.
1369 :type teardown_kw: Keyword
1372 for keyword in teardown_kw.keywords:
1373 if self.start_teardown_kw(keyword) is not False:
1374 self.visit_teardown_kw(keyword)
1375 self.end_teardown_kw(keyword)
1377 def start_teardown_kw(self, teardown_kw):
1378 """Called when teardown keyword starts
1380 :param teardown_kw: Keyword to process.
1381 :type teardown_kw: Keyword
1384 if teardown_kw.name.count(u"Show Papi History On All Duts"):
1385 self._conf_history_lookup_nr = 0
1386 self._msg_type = u"teardown-papi-history"
1387 teardown_kw.messages.visit(self)
1389 def end_teardown_kw(self, teardown_kw):
1390 """Called when keyword ends. Default implementation does nothing.
1392 :param teardown_kw: Keyword to process.
1393 :type teardown_kw: Keyword
1397 def visit_message(self, msg):
1398 """Implements visiting the message.
1400 :param msg: Message to process.
1404 if self.start_message(msg) is not False:
1405 self.end_message(msg)
1407 def start_message(self, msg):
1408 """Called when message starts. Get required information from messages:
1411 :param msg: Message to process.
1416 self.parse_msg[self._msg_type](msg)
1418 def end_message(self, msg):
1419 """Called when message ends. Default implementation does nothing.
1421 :param msg: Message to process.
1430 The data is extracted from output.xml files generated by Jenkins jobs and
1431 stored in pandas' DataFrames.
1437 (as described in ExecutionChecker documentation)
1439 (as described in ExecutionChecker documentation)
1441 (as described in ExecutionChecker documentation)
1444 def __init__(self, spec, for_output):
1447 :param spec: Specification.
1448 :param for_output: Output to be generated from downloaded data.
1449 :type spec: Specification
1450 :type for_output: str
1456 self._for_output = for_output
1459 self._input_data = pd.Series()
1463 """Getter - Input data.
1465 :returns: Input data
1466 :rtype: pandas.Series
1468 return self._input_data
1470 def metadata(self, job, build):
1471 """Getter - metadata
1473 :param job: Job which metadata we want.
1474 :param build: Build which metadata we want.
1478 :rtype: pandas.Series
1480 return self.data[job][build][u"metadata"]
1482 def suites(self, job, build):
1485 :param job: Job which suites we want.
1486 :param build: Build which suites we want.
1490 :rtype: pandas.Series
1492 return self.data[job][str(build)][u"suites"]
1494 def tests(self, job, build):
1497 :param job: Job which tests we want.
1498 :param build: Build which tests we want.
1502 :rtype: pandas.Series
1504 return self.data[job][build][u"tests"]
1506 def _parse_tests(self, job, build):
1507 """Process data from robot output.xml file and return JSON structured
1510 :param job: The name of job which build output data will be processed.
1511 :param build: The build which output data will be processed.
1514 :returns: JSON data structure.
1523 with open(build[u"file-name"], u'r') as data_file:
1525 result = ExecutionResult(data_file)
1526 except errors.DataError as err:
1528 f"Error occurred while parsing output.xml: {repr(err)}"
1531 checker = ExecutionChecker(
1532 metadata, self._cfg.mapping, self._cfg.ignore, self._for_output
1534 result.visit(checker)
1536 checker.data[u"metadata"][u"tests_total"] = \
1537 result.statistics.total.all.total
1538 checker.data[u"metadata"][u"tests_passed"] = \
1539 result.statistics.total.all.passed
1540 checker.data[u"metadata"][u"tests_failed"] = \
1541 result.statistics.total.all.failed
1542 checker.data[u"metadata"][u"elapsedtime"] = result.suite.elapsedtime
1543 checker.data[u"metadata"][u"generated"] = result.suite.endtime[:14]
1547 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1548 """Download and parse the input data file.
1550 :param pid: PID of the process executing this method.
1551 :param job: Name of the Jenkins job which generated the processed input
1553 :param build: Information about the Jenkins build which generated the
1554 processed input file.
1555 :param repeat: Repeat the download specified number of times if not
1563 logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1570 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1576 f"It is not possible to download the input data file from the "
1577 f"job {job}, build {build[u'build']}, or it is damaged. "
1581 logging.info(f" Processing data from build {build[u'build']}")
1582 data = self._parse_tests(job, build)
1585 f"Input data file from the job {job}, build "
1586 f"{build[u'build']} is damaged. Skipped."
1589 state = u"processed"
1592 remove(build[u"file-name"])
1593 except OSError as err:
1595 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1598 # If the time-period is defined in the specification file, remove all
1599 # files which are outside the time period.
1601 timeperiod = self._cfg.environment.get(u"time-period", None)
1602 if timeperiod and data:
1604 timeperiod = timedelta(int(timeperiod))
1605 metadata = data.get(u"metadata", None)
1607 generated = metadata.get(u"generated", None)
1609 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1610 if (now - generated) > timeperiod:
1611 # Remove the data and the file:
1616 f" The build {job}/{build[u'build']} is "
1617 f"outdated, will be removed."
1627 def download_and_parse_data(self, repeat=1):
1628 """Download the input data files, parse input data from input files and
1629 store in pandas' Series.
1631 :param repeat: Repeat the download specified number of times if not
1636 logging.info(u"Downloading and parsing input files ...")
1638 for job, builds in self._cfg.input.items():
1639 for build in builds:
1641 result = self._download_and_parse_build(job, build, repeat)
1644 build_nr = result[u"build"][u"build"]
1647 data = result[u"data"]
1648 build_data = pd.Series({
1649 u"metadata": pd.Series(
1650 list(data[u"metadata"].values()),
1651 index=list(data[u"metadata"].keys())
1653 u"suites": pd.Series(
1654 list(data[u"suites"].values()),
1655 index=list(data[u"suites"].keys())
1657 u"tests": pd.Series(
1658 list(data[u"tests"].values()),
1659 index=list(data[u"tests"].keys())
1663 if self._input_data.get(job, None) is None:
1664 self._input_data[job] = pd.Series()
1665 self._input_data[job][str(build_nr)] = build_data
1666 self._cfg.set_input_file_name(
1667 job, build_nr, result[u"build"][u"file-name"]
1669 self._cfg.set_input_state(job, build_nr, result[u"state"])
1672 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1673 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1675 logging.info(u"Done.")
1677 msg = f"Successful downloads from the sources:\n"
1678 for source in self._cfg.environment[u"data-sources"]:
1679 if source[u"successful-downloads"]:
1681 f"{source[u'url']}/{source[u'path']}/"
1682 f"{source[u'file-name']}: "
1683 f"{source[u'successful-downloads']}\n"
1687 def process_local_file(self, local_file, job=u"local", build_nr=1,
1689 """Process local XML file given as a command-line parameter.
1691 :param local_file: The file to process.
1692 :param job: Job name.
1693 :param build_nr: Build number.
1694 :param replace: If True, the information about jobs and builds is
1695 replaced by the new one, otherwise the new jobs and builds are
1697 :type local_file: str
1701 :raises: PresentationError if an error occurs.
1703 if not isfile(local_file):
1704 raise PresentationError(f"The file {local_file} does not exist.")
1707 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1708 except (IndexError, ValueError):
1713 u"status": u"failed",
1714 u"file-name": local_file
1717 self._cfg.input = dict()
1718 self._cfg.add_build(job, build)
1720 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1721 data = self._parse_tests(job, build)
1723 raise PresentationError(
1724 f"Error occurred while parsing the file {local_file}"
1727 build_data = pd.Series({
1728 u"metadata": pd.Series(
1729 list(data[u"metadata"].values()),
1730 index=list(data[u"metadata"].keys())
1732 u"suites": pd.Series(
1733 list(data[u"suites"].values()),
1734 index=list(data[u"suites"].keys())
1736 u"tests": pd.Series(
1737 list(data[u"tests"].values()),
1738 index=list(data[u"tests"].keys())
1742 if self._input_data.get(job, None) is None:
1743 self._input_data[job] = pd.Series()
1744 self._input_data[job][str(build_nr)] = build_data
1746 self._cfg.set_input_state(job, build_nr, u"processed")
1748 def process_local_directory(self, local_dir, replace=True):
1749 """Process local directory with XML file(s). The directory is processed
1750 as a 'job' and the XML files in it as builds.
1751 If the given directory contains only sub-directories, these
1752 sub-directories processed as jobs and corresponding XML files as builds
1755 :param local_dir: Local directory to process.
1756 :param replace: If True, the information about jobs and builds is
1757 replaced by the new one, otherwise the new jobs and builds are
1759 :type local_dir: str
1762 if not isdir(local_dir):
1763 raise PresentationError(
1764 f"The directory {local_dir} does not exist."
1767 # Check if the given directory includes only files, or only directories
1768 _, dirnames, filenames = next(walk(local_dir))
1770 if filenames and not dirnames:
1773 # key: dir (job) name, value: list of file names (builds)
1775 local_dir: [join(local_dir, name) for name in filenames]
1778 elif dirnames and not filenames:
1781 # key: dir (job) name, value: list of file names (builds)
1782 local_builds = dict()
1783 for dirname in dirnames:
1785 join(local_dir, dirname, name)
1786 for name in listdir(join(local_dir, dirname))
1787 if isfile(join(local_dir, dirname, name))
1790 local_builds[dirname] = sorted(builds)
1792 elif not filenames and not dirnames:
1793 raise PresentationError(f"The directory {local_dir} is empty.")
1795 raise PresentationError(
1796 f"The directory {local_dir} can include only files or only "
1797 f"directories, not both.\nThe directory {local_dir} includes "
1798 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1802 self._cfg.input = dict()
1804 for job, files in local_builds.items():
1805 for idx, local_file in enumerate(files):
1806 self.process_local_file(local_file, job, idx + 1, replace=False)
1809 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1810 """Return the index of character in the string which is the end of tag.
1812 :param tag_filter: The string where the end of tag is being searched.
1813 :param start: The index where the searching is stated.
1814 :param closer: The character which is the tag closer.
1815 :type tag_filter: str
1818 :returns: The index of the tag closer.
1822 idx_opener = tag_filter.index(closer, start)
1823 return tag_filter.index(closer, idx_opener + 1)
1828 def _condition(tag_filter):
1829 """Create a conditional statement from the given tag filter.
1831 :param tag_filter: Filter based on tags from the element specification.
1832 :type tag_filter: str
1833 :returns: Conditional statement which can be evaluated.
1838 index = InputData._end_of_tag(tag_filter, index)
1842 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1844 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1845 continue_on_error=False):
1846 """Filter required data from the given jobs and builds.
1848 The output data structure is:
1851 - test (or suite) 1 ID:
1857 - test (or suite) n ID:
1864 :param element: Element which will use the filtered data.
1865 :param params: Parameters which will be included in the output. If None,
1866 all parameters are included.
1867 :param data: If not None, this data is used instead of data specified
1869 :param data_set: The set of data to be filtered: tests, suites,
1871 :param continue_on_error: Continue if there is error while reading the
1872 data. The Item will be empty then
1873 :type element: pandas.Series
1877 :type continue_on_error: bool
1878 :returns: Filtered data.
1879 :rtype pandas.Series
1883 if data_set == "suites":
1885 elif element[u"filter"] in (u"all", u"template"):
1888 cond = InputData._condition(element[u"filter"])
1889 logging.debug(f" Filter: {cond}")
1891 logging.error(u" No filter defined.")
1895 params = element.get(u"parameters", None)
1897 params.extend((u"type", u"status"))
1899 data_to_filter = data if data else element[u"data"]
1902 for job, builds in data_to_filter.items():
1903 data[job] = pd.Series()
1904 for build in builds:
1905 data[job][str(build)] = pd.Series()
1908 self.data[job][str(build)][data_set].items())
1910 if continue_on_error:
1914 for test_id, test_data in data_dict.items():
1915 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1916 data[job][str(build)][test_id] = pd.Series()
1918 for param, val in test_data.items():
1919 data[job][str(build)][test_id][param] = val
1921 for param in params:
1923 data[job][str(build)][test_id][param] =\
1926 data[job][str(build)][test_id][param] =\
1930 except (KeyError, IndexError, ValueError) as err:
1932 f"Missing mandatory parameter in the element specification: "
1936 except AttributeError as err:
1937 logging.error(repr(err))
1939 except SyntaxError as err:
1941 f"The filter {cond} is not correct. Check if all tags are "
1942 f"enclosed by apostrophes.\n{repr(err)}"
1946 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1947 continue_on_error=False):
1948 """Filter required data from the given jobs and builds.
1950 The output data structure is:
1953 - test (or suite) 1 ID:
1959 - test (or suite) n ID:
1966 :param element: Element which will use the filtered data.
1967 :param params: Parameters which will be included in the output. If None,
1968 all parameters are included.
1969 :param data_set: The set of data to be filtered: tests, suites,
1971 :param continue_on_error: Continue if there is error while reading the
1972 data. The Item will be empty then
1973 :type element: pandas.Series
1976 :type continue_on_error: bool
1977 :returns: Filtered data.
1978 :rtype pandas.Series
1981 include = element.get(u"include", None)
1983 logging.warning(u"No tests to include, skipping the element.")
1987 params = element.get(u"parameters", None)
1988 if params and u"type" not in params:
1989 params.append(u"type")
1991 cores = element.get(u"core", None)
1995 for test in include:
1996 tests.append(test.format(core=core))
2002 for job, builds in element[u"data"].items():
2003 data[job] = pd.Series()
2004 for build in builds:
2005 data[job][str(build)] = pd.Series()
2008 reg_ex = re.compile(str(test).lower())
2009 for test_id in self.data[job][
2010 str(build)][data_set].keys():
2011 if re.match(reg_ex, str(test_id).lower()):
2012 test_data = self.data[job][
2013 str(build)][data_set][test_id]
2014 data[job][str(build)][test_id] = pd.Series()
2016 for param, val in test_data.items():
2017 data[job][str(build)][test_id]\
2020 for param in params:
2022 data[job][str(build)][
2026 data[job][str(build)][
2027 test_id][param] = u"No Data"
2028 except KeyError as err:
2029 if continue_on_error:
2030 logging.debug(repr(err))
2032 logging.error(repr(err))
2036 except (KeyError, IndexError, ValueError) as err:
2038 f"Missing mandatory parameter in the element "
2039 f"specification: {repr(err)}"
2042 except AttributeError as err:
2043 logging.error(repr(err))
2047 def merge_data(data):
2048 """Merge data from more jobs and builds to a simple data structure.
2050 The output data structure is:
2052 - test (suite) 1 ID:
2058 - test (suite) n ID:
2061 :param data: Data to merge.
2062 :type data: pandas.Series
2063 :returns: Merged data.
2064 :rtype: pandas.Series
2067 logging.info(u" Merging data ...")
2069 merged_data = pd.Series()
2070 for builds in data.values:
2071 for item in builds.values:
2072 for item_id, item_data in item.items():
2073 merged_data[item_id] = item_data
2076 def print_all_oper_data(self):
2077 """Print all operational data to console.
2080 for job in self._input_data.values:
2081 for build in job.values:
2082 for test_id, test_data in build[u"tests"].items():
2084 if test_data.get(u"show-run", None) is None:
2086 for dut_name, data in test_data[u"show-run"].items():
2087 if data.get(u"runtime", None) is None:
2089 runtime = loads(data[u"runtime"])
2091 threads_nr = len(runtime[0][u"clocks"])
2092 except (IndexError, KeyError):
2094 threads = OrderedDict(
2095 {idx: list() for idx in range(threads_nr)})
2096 for item in runtime:
2097 for idx in range(threads_nr):
2098 if item[u"vectors"][idx] > 0:
2099 clocks = item[u"clocks"][idx] / \
2100 item[u"vectors"][idx]
2101 elif item[u"calls"][idx] > 0:
2102 clocks = item[u"clocks"][idx] / \
2104 elif item[u"suspends"][idx] > 0:
2105 clocks = item[u"clocks"][idx] / \
2106 item[u"suspends"][idx]
2110 if item[u"calls"][idx] > 0:
2111 vectors_call = item[u"vectors"][idx] / \
2116 if int(item[u"calls"][idx]) + int(
2117 item[u"vectors"][idx]) + \
2118 int(item[u"suspends"][idx]):
2119 threads[idx].append([
2121 item[u"calls"][idx],
2122 item[u"vectors"][idx],
2123 item[u"suspends"][idx],
2128 print(f"Host IP: {data.get(u'host', '')}, "
2129 f"Socket: {data.get(u'socket', '')}")
2130 for thread_nr, thread in threads.items():
2131 txt_table = prettytable.PrettyTable(
2137 u"Cycles per Packet",
2138 u"Average Vector Size"
2143 txt_table.add_row(row)
2145 if len(thread) == 0:
2148 avg = f", Average Vector Size per Node: " \
2149 f"{(avg / len(thread)):.2f}"
2150 th_name = u"main" if thread_nr == 0 \
2151 else f"worker_{thread_nr}"
2152 print(f"{dut_name}, {th_name}{avg}")
2153 txt_table.float_format = u".2"
2154 txt_table.align = u"r"
2155 txt_table.align[u"Name"] = u"l"
2156 print(f"{txt_table.get_string()}\n")