1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_CPS_MSG_INFO = re.compile(
233 r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234 r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
236 REGEX_PPS_MSG_INFO = re.compile(
237 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
240 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
242 REGEX_VSAP_MSG_INFO = re.compile(
243 r'Transfer Rate: (\d*.\d*).*\n'
244 r'Latency: (\d*.\d*).*\n'
245 r'Completed requests: (\d*).*\n'
246 r'Failed requests: (\d*).*\n'
247 r'Total data transferred: (\d*).*\n'
248 r'Connection [cr]ps rate:\s*(\d*.\d*)'
251 # Needed for CPS and PPS tests
252 REGEX_NDRPDR_LAT_BASE = re.compile(
253 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
254 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
256 REGEX_NDRPDR_LAT = re.compile(
257 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
258 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
259 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
260 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
261 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
262 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
265 REGEX_VERSION_VPP = re.compile(
266 r"(return STDOUT Version:\s*|"
267 r"VPP Version:\s*|VPP version:\s*)(.*)"
269 REGEX_VERSION_DPDK = re.compile(
270 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
272 REGEX_TCP = re.compile(
273 r'Total\s(rps|cps|throughput):\s(\d*).*$'
275 REGEX_MRR = re.compile(
276 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
277 r'tx\s(\d*),\srx\s(\d*)'
279 REGEX_BMRR = re.compile(
280 r'.*trial results.*: \[(.*)\]'
282 REGEX_RECONF_LOSS = re.compile(
283 r'Packets lost due to reconfig: (\d*)'
285 REGEX_RECONF_TIME = re.compile(
286 r'Implied time lost: (\d*.[\de-]*)'
288 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
290 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
292 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
294 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
296 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
298 REGEX_SH_RUN_HOST = re.compile(
299 r'hostname=\"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\",hook=\"(.*)\"'
302 def __init__(self, metadata, mapping, ignore, for_output):
305 :param metadata: Key-value pairs to be included in "metadata" part of
307 :param mapping: Mapping of the old names of test cases to the new
309 :param ignore: List of TCs to be ignored.
310 :param for_output: Output to be generated from downloaded data.
314 :type for_output: str
317 # Type of message to parse out from the test messages
318 self._msg_type = None
324 self._timestamp = None
326 # Testbed. The testbed is identified by TG node IP address.
329 # Mapping of TCs long names
330 self._mapping = mapping
333 self._ignore = ignore
335 self._for_output = for_output
337 # Number of PAPI History messages found:
339 # 1 - PAPI History of DUT1
340 # 2 - PAPI History of DUT2
341 self._conf_history_lookup_nr = 0
343 self._sh_run_counter = 0
344 self._telemetry_kw_counter = 0
345 self._telemetry_msg_counter = 0
347 # Test ID of currently processed test- the lowercase full path to the
351 # The main data structure
353 u"metadata": OrderedDict(),
354 u"suites": OrderedDict(),
355 u"tests": OrderedDict()
358 # Save the provided metadata
359 for key, val in metadata.items():
360 self._data[u"metadata"][key] = val
362 # Dictionary defining the methods used to parse different types of
365 u"timestamp": self._get_timestamp,
366 u"vpp-version": self._get_vpp_version,
367 u"dpdk-version": self._get_dpdk_version,
368 u"teardown-papi-history": self._get_papi_history,
369 u"test-show-runtime": self._get_show_run,
370 u"testbed": self._get_testbed,
371 u"test-telemetry": self._get_telemetry
376 """Getter - Data parsed from the XML file.
378 :returns: Data parsed from the XML file.
383 def _get_data_from_mrr_test_msg(self, msg):
384 """Get info from message of MRR performance tests.
386 :param msg: Message to be processed.
388 :returns: Processed message or original message if a problem occurs.
392 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
393 if not groups or groups.lastindex != 1:
394 return u"Test Failed."
397 data = groups.group(1).split(u", ")
398 except (AttributeError, IndexError, ValueError, KeyError):
399 return u"Test Failed."
404 out_str += f"{(float(item) / 1e6):.2f}, "
405 return out_str[:-2] + u"]"
406 except (AttributeError, IndexError, ValueError, KeyError):
407 return u"Test Failed."
409 def _get_data_from_cps_test_msg(self, msg):
410 """Get info from message of NDRPDR CPS tests.
412 :param msg: Message to be processed.
414 :returns: Processed message or "Test Failed." if a problem occurs.
418 groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
419 if not groups or groups.lastindex != 2:
420 return u"Test Failed."
424 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
425 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
427 except (AttributeError, IndexError, ValueError, KeyError):
428 return u"Test Failed."
430 def _get_data_from_pps_test_msg(self, msg):
431 """Get info from message of NDRPDR PPS tests.
433 :param msg: Message to be processed.
435 :returns: Processed message or "Test Failed." if a problem occurs.
439 groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
440 if not groups or groups.lastindex != 4:
441 return u"Test Failed."
445 f"1. {(float(groups.group(1)) / 1e6):5.2f} "
446 f"{float(groups.group(2)):5.2f}\n"
447 f"2. {(float(groups.group(3)) / 1e6):5.2f} "
448 f"{float(groups.group(4)):5.2f}"
450 except (AttributeError, IndexError, ValueError, KeyError):
451 return u"Test Failed."
453 def _get_data_from_perf_test_msg(self, msg):
454 """Get info from message of NDRPDR performance tests.
456 :param msg: Message to be processed.
458 :returns: Processed message or "Test Failed." if a problem occurs.
462 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
463 if not groups or groups.lastindex != 10:
464 return u"Test Failed."
468 u"ndr_low": float(groups.group(1)),
469 u"ndr_low_b": float(groups.group(2)),
470 u"pdr_low": float(groups.group(3)),
471 u"pdr_low_b": float(groups.group(4)),
472 u"pdr_lat_90_1": groups.group(5),
473 u"pdr_lat_90_2": groups.group(6),
474 u"pdr_lat_50_1": groups.group(7),
475 u"pdr_lat_50_2": groups.group(8),
476 u"pdr_lat_10_1": groups.group(9),
477 u"pdr_lat_10_2": groups.group(10),
479 except (AttributeError, IndexError, ValueError, KeyError):
480 return u"Test Failed."
482 def _process_lat(in_str_1, in_str_2):
483 """Extract min, avg, max values from latency string.
485 :param in_str_1: Latency string for one direction produced by robot
487 :param in_str_2: Latency string for second direction produced by
491 :returns: Processed latency string or None if a problem occurs.
494 in_list_1 = in_str_1.split('/', 3)
495 in_list_2 = in_str_2.split('/', 3)
497 if len(in_list_1) != 4 and len(in_list_2) != 4:
500 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
502 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
503 except hdrh.codec.HdrLengthException:
506 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
508 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
509 except hdrh.codec.HdrLengthException:
512 if hdr_lat_1 and hdr_lat_2:
514 hdr_lat_1.get_value_at_percentile(50.0),
515 hdr_lat_1.get_value_at_percentile(90.0),
516 hdr_lat_1.get_value_at_percentile(99.0),
517 hdr_lat_2.get_value_at_percentile(50.0),
518 hdr_lat_2.get_value_at_percentile(90.0),
519 hdr_lat_2.get_value_at_percentile(99.0)
529 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
530 f"{data[u'ndr_low_b']:5.2f}"
531 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
532 f"{data[u'pdr_low_b']:5.2f}"
535 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
536 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
537 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
540 max_len = len(str(max((max(item) for item in latency))))
541 max_len = 4 if max_len < 4 else max_len
543 for idx, lat in enumerate(latency):
548 f"{lat[0]:{max_len}d} "
549 f"{lat[1]:{max_len}d} "
550 f"{lat[2]:{max_len}d} "
551 f"{lat[3]:{max_len}d} "
552 f"{lat[4]:{max_len}d} "
553 f"{lat[5]:{max_len}d} "
558 except (AttributeError, IndexError, ValueError, KeyError):
559 return u"Test Failed."
561 def _get_testbed(self, msg):
562 """Called when extraction of testbed IP is required.
563 The testbed is identified by TG node IP address.
565 :param msg: Message to process.
570 if msg.message.count(u"Setup of TG node") or \
571 msg.message.count(u"Setup of node TG host"):
572 reg_tg_ip = re.compile(
573 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
575 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
576 except (KeyError, ValueError, IndexError, AttributeError):
579 self._data[u"metadata"][u"testbed"] = self._testbed
580 self._msg_type = None
582 def _get_vpp_version(self, msg):
583 """Called when extraction of VPP version is required.
585 :param msg: Message to process.
590 if msg.message.count(u"return STDOUT Version:") or \
591 msg.message.count(u"VPP Version:") or \
592 msg.message.count(u"VPP version:"):
594 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
596 self._data[u"metadata"][u"version"] = self._version
597 self._msg_type = None
599 def _get_dpdk_version(self, msg):
600 """Called when extraction of DPDK version is required.
602 :param msg: Message to process.
607 if msg.message.count(u"DPDK Version:"):
609 self._version = str(re.search(
610 self.REGEX_VERSION_DPDK, msg.message).group(2))
611 self._data[u"metadata"][u"version"] = self._version
615 self._msg_type = None
617 def _get_timestamp(self, msg):
618 """Called when extraction of timestamp is required.
620 :param msg: Message to process.
625 self._timestamp = msg.timestamp[:14]
626 self._data[u"metadata"][u"generated"] = self._timestamp
627 self._msg_type = None
629 def _get_papi_history(self, msg):
630 """Called when extraction of PAPI command history is required.
632 :param msg: Message to process.
636 if msg.message.count(u"PAPI command history:"):
637 self._conf_history_lookup_nr += 1
638 if self._conf_history_lookup_nr == 1:
639 self._data[u"tests"][self._test_id][u"conf-history"] = str()
641 self._msg_type = None
643 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
647 ).replace(u'"', u"'")
648 self._data[u"tests"][self._test_id][u"conf-history"] += (
649 f"**DUT{str(self._conf_history_lookup_nr)}:** {text}"
652 def _get_show_run(self, msg):
653 """Called when extraction of VPP operational data (output of CLI command
654 Show Runtime) is required.
656 :param msg: Message to process.
661 if not msg.message.count(u"stats runtime"):
665 if self._sh_run_counter > 1:
668 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
669 self._data[u"tests"][self._test_id][u"show-run"] = dict()
671 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
675 host = groups.group(1)
676 except (AttributeError, IndexError):
679 sock = groups.group(2)
680 except (AttributeError, IndexError):
683 dut = u"dut{nr}".format(
684 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
686 self._data[u'tests'][self._test_id][u'show-run'][dut] = \
691 u"runtime": str(msg.message).replace(u' ', u'').
692 replace(u'\n', u'').replace(u"'", u'"').
693 replace(u'b"', u'"').replace(u'u"', u'"').
698 def _get_telemetry(self, msg):
699 """Called when extraction of VPP telemetry data is required.
701 :param msg: Message to process.
706 if self._telemetry_kw_counter > 1:
708 if not msg.message.count(u"# TYPE vpp_runtime_calls"):
711 if u"telemetry-show-run" not in \
712 self._data[u"tests"][self._test_id].keys():
713 self._data[u"tests"][self._test_id][u"telemetry-show-run"] = dict()
715 self._telemetry_msg_counter += 1
716 groups = re.search(self.REGEX_SH_RUN_HOST, msg.message)
720 host = groups.group(1)
721 except (AttributeError, IndexError):
724 sock = groups.group(2)
725 except (AttributeError, IndexError):
728 u"source_type": u"node",
730 u"msg_type": u"metric",
731 u"log_level": u"INFO",
732 u"timestamp": msg.timestamp,
733 u"msg": u"show_runtime",
738 for line in msg.message.splitlines():
739 if not line.startswith(u"vpp_runtime_"):
742 params, value, timestamp = line.rsplit(u" ", maxsplit=2)
743 cut = params.index(u"{")
744 name = params[:cut].split(u"_", maxsplit=2)[-1]
746 u"dict" + params[cut:].replace('{', '(').replace('}', ')')
748 labels[u"graph_node"] = labels.pop(u"name")
749 runtime[u"data"].append(
753 u"timestamp": timestamp,
757 except (TypeError, ValueError, IndexError):
759 self._data[u'tests'][self._test_id][u'telemetry-show-run']\
760 [f"dut{self._telemetry_msg_counter}"] = copy.copy(
768 def _get_ndrpdr_throughput(self, msg):
769 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
772 :param msg: The test message to be parsed.
774 :returns: Parsed data as a dict and the status (PASS/FAIL).
775 :rtype: tuple(dict, str)
779 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
780 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
783 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
785 if groups is not None:
787 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
788 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
789 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
790 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
792 except (IndexError, ValueError):
795 return throughput, status
797 def _get_ndrpdr_throughput_gbps(self, msg):
798 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
801 :param msg: The test message to be parsed.
803 :returns: Parsed data as a dict and the status (PASS/FAIL).
804 :rtype: tuple(dict, str)
808 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
809 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
812 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
814 if groups is not None:
816 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
817 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
818 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
819 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
821 except (IndexError, ValueError):
826 def _get_plr_throughput(self, msg):
827 """Get PLRsearch lower bound and PLRsearch upper bound from the test
830 :param msg: The test message to be parsed.
832 :returns: Parsed data as a dict and the status (PASS/FAIL).
833 :rtype: tuple(dict, str)
841 groups = re.search(self.REGEX_PLR_RATE, msg)
843 if groups is not None:
845 throughput[u"LOWER"] = float(groups.group(1))
846 throughput[u"UPPER"] = float(groups.group(2))
848 except (IndexError, ValueError):
851 return throughput, status
853 def _get_ndrpdr_latency(self, msg):
854 """Get LATENCY from the test message.
856 :param msg: The test message to be parsed.
858 :returns: Parsed data as a dict and the status (PASS/FAIL).
859 :rtype: tuple(dict, str)
869 u"direction1": copy.copy(latency_default),
870 u"direction2": copy.copy(latency_default)
873 u"direction1": copy.copy(latency_default),
874 u"direction2": copy.copy(latency_default)
877 u"direction1": copy.copy(latency_default),
878 u"direction2": copy.copy(latency_default)
881 u"direction1": copy.copy(latency_default),
882 u"direction2": copy.copy(latency_default)
885 u"direction1": copy.copy(latency_default),
886 u"direction2": copy.copy(latency_default)
889 u"direction1": copy.copy(latency_default),
890 u"direction2": copy.copy(latency_default)
894 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
896 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
898 return latency, u"FAIL"
900 def process_latency(in_str):
901 """Return object with parsed latency values.
903 TODO: Define class for the return type.
905 :param in_str: Input string, min/avg/max/hdrh format.
907 :returns: Dict with corresponding keys, except hdrh float values.
909 :throws IndexError: If in_str does not have enough substrings.
910 :throws ValueError: If a substring does not convert to float.
912 in_list = in_str.split('/', 3)
915 u"min": float(in_list[0]),
916 u"avg": float(in_list[1]),
917 u"max": float(in_list[2]),
921 if len(in_list) == 4:
922 rval[u"hdrh"] = str(in_list[3])
927 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
928 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
929 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
930 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
931 if groups.lastindex == 4:
932 return latency, u"PASS"
933 except (IndexError, ValueError):
937 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
938 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
939 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
940 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
941 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
942 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
943 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
944 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
945 if groups.lastindex == 12:
946 return latency, u"PASS"
947 except (IndexError, ValueError):
950 return latency, u"FAIL"
953 def _get_hoststack_data(msg, tags):
954 """Get data from the hoststack test message.
956 :param msg: The test message to be parsed.
957 :param tags: Test tags.
960 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
961 :rtype: tuple(dict, str)
966 msg = msg.replace(u"'", u'"').replace(u" ", u"")
967 if u"LDPRELOAD" in tags:
971 except JSONDecodeError:
973 elif u"VPPECHO" in tags:
975 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
977 client=loads(msg_lst[0]),
978 server=loads(msg_lst[1])
981 except (JSONDecodeError, IndexError):
984 return result, status
986 def _get_vsap_data(self, msg, tags):
987 """Get data from the vsap test message.
989 :param msg: The test message to be parsed.
990 :param tags: Test tags.
993 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
994 :rtype: tuple(dict, str)
999 groups = re.search(self.REGEX_VSAP_MSG_INFO, msg)
1000 if groups is not None:
1002 result[u"transfer-rate"] = float(groups.group(1)) * 1e3
1003 result[u"latency"] = float(groups.group(2))
1004 result[u"completed-requests"] = int(groups.group(3))
1005 result[u"failed-requests"] = int(groups.group(4))
1006 result[u"bytes-transferred"] = int(groups.group(5))
1007 if u"TCP_CPS"in tags:
1008 result[u"cps"] = float(groups.group(6))
1009 elif u"TCP_RPS" in tags:
1010 result[u"rps"] = float(groups.group(6))
1012 return result, status
1014 except (IndexError, ValueError):
1017 return result, status
1019 def visit_suite(self, suite):
1020 """Implements traversing through the suite and its direct children.
1022 :param suite: Suite to process.
1026 if self.start_suite(suite) is not False:
1027 suite.suites.visit(self)
1028 suite.tests.visit(self)
1029 self.end_suite(suite)
1031 def start_suite(self, suite):
1032 """Called when suite starts.
1034 :param suite: Suite to process.
1040 parent_name = suite.parent.name
1041 except AttributeError:
1044 self._data[u"suites"][suite.longname.lower().
1045 replace(u'"', u"'").
1046 replace(u" ", u"_")] = {
1047 u"name": suite.name.lower(),
1049 u"parent": parent_name,
1050 u"level": len(suite.longname.split(u"."))
1053 suite.keywords.visit(self)
1055 def end_suite(self, suite):
1056 """Called when suite ends.
1058 :param suite: Suite to process.
1063 def visit_test(self, test):
1064 """Implements traversing through the test.
1066 :param test: Test to process.
1070 if self.start_test(test) is not False:
1071 test.keywords.visit(self)
1074 def start_test(self, test):
1075 """Called when test starts.
1077 :param test: Test to process.
1082 self._sh_run_counter = 0
1083 self._telemetry_kw_counter = 0
1084 self._telemetry_msg_counter = 0
1086 longname_orig = test.longname.lower()
1088 # Check the ignore list
1089 if longname_orig in self._ignore:
1092 tags = [str(tag) for tag in test.tags]
1093 test_result = dict()
1095 # Change the TC long name and name if defined in the mapping table
1096 longname = self._mapping.get(longname_orig, None)
1097 if longname is not None:
1098 name = longname.split(u'.')[-1]
1100 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1104 longname = longname_orig
1105 name = test.name.lower()
1107 # Remove TC number from the TC long name (backward compatibility):
1108 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1109 # Remove TC number from the TC name (not needed):
1110 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1112 test_result[u"parent"] = test.parent.name.lower()
1113 test_result[u"tags"] = tags
1114 test_result["doc"] = test.doc
1115 test_result[u"type"] = u""
1116 test_result[u"status"] = test.status
1117 test_result[u"starttime"] = test.starttime
1118 test_result[u"endtime"] = test.endtime
1120 if test.status == u"PASS":
1121 if u"NDRPDR" in tags:
1122 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1123 test_result[u"msg"] = self._get_data_from_pps_test_msg(
1125 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1126 test_result[u"msg"] = self._get_data_from_cps_test_msg(
1129 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1131 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1132 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1135 test_result[u"msg"] = test.message
1137 test_result[u"msg"] = test.message
1139 if u"PERFTEST" in tags:
1140 # Replace info about cores (e.g. -1c-) with the info about threads
1141 # and cores (e.g. -1t1c-) in the long test case names and in the
1142 # test case names if necessary.
1143 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1147 for tag in test_result[u"tags"]:
1148 groups = re.search(self.REGEX_TC_TAG, tag)
1154 self._test_id = re.sub(
1155 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1156 self._test_id, count=1
1158 test_result[u"name"] = re.sub(
1159 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1160 test_result["name"], count=1
1163 test_result[u"status"] = u"FAIL"
1164 self._data[u"tests"][self._test_id] = test_result
1166 f"The test {self._test_id} has no or more than one "
1167 f"multi-threading tags.\n"
1168 f"Tags: {test_result[u'tags']}"
1172 if u"DEVICETEST" in tags:
1173 test_result[u"type"] = u"DEVICETEST"
1174 elif u"NDRPDR" in tags:
1175 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1176 test_result[u"type"] = u"CPS"
1178 test_result[u"type"] = u"NDRPDR"
1179 if test.status == u"PASS":
1180 test_result[u"throughput"], test_result[u"status"] = \
1181 self._get_ndrpdr_throughput(test.message)
1182 test_result[u"gbps"], test_result[u"status"] = \
1183 self._get_ndrpdr_throughput_gbps(test.message)
1184 test_result[u"latency"], test_result[u"status"] = \
1185 self._get_ndrpdr_latency(test.message)
1186 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1188 test_result[u"type"] = u"MRR"
1190 test_result[u"type"] = u"BMRR"
1191 if test.status == u"PASS":
1192 test_result[u"result"] = dict()
1193 groups = re.search(self.REGEX_BMRR, test.message)
1194 if groups is not None:
1195 items_str = groups.group(1)
1197 float(item.strip().replace(u"'", u""))
1198 for item in items_str.split(",")
1200 # Use whole list in CSIT-1180.
1201 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1202 test_result[u"result"][u"samples"] = items_float
1203 test_result[u"result"][u"receive-rate"] = stats.avg
1204 test_result[u"result"][u"receive-stdev"] = stats.stdev
1206 groups = re.search(self.REGEX_MRR, test.message)
1207 test_result[u"result"][u"receive-rate"] = \
1208 float(groups.group(3)) / float(groups.group(1))
1209 elif u"SOAK" in tags:
1210 test_result[u"type"] = u"SOAK"
1211 if test.status == u"PASS":
1212 test_result[u"throughput"], test_result[u"status"] = \
1213 self._get_plr_throughput(test.message)
1214 elif u"HOSTSTACK" in tags:
1215 test_result[u"type"] = u"HOSTSTACK"
1216 if test.status == u"PASS":
1217 test_result[u"result"], test_result[u"status"] = \
1218 self._get_hoststack_data(test.message, tags)
1219 elif u"LDP_NGINX" in tags:
1220 test_result[u"type"] = u"LDP_NGINX"
1221 test_result[u"result"], test_result[u"status"] = \
1222 self._get_vsap_data(test.message, tags)
1223 # elif u"TCP" in tags: # This might be not used
1224 # test_result[u"type"] = u"TCP"
1225 # if test.status == u"PASS":
1226 # groups = re.search(self.REGEX_TCP, test.message)
1227 # test_result[u"result"] = int(groups.group(2))
1228 elif u"RECONF" in tags:
1229 test_result[u"type"] = u"RECONF"
1230 if test.status == u"PASS":
1231 test_result[u"result"] = None
1233 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1234 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1235 test_result[u"result"] = {
1236 u"loss": int(grps_loss.group(1)),
1237 u"time": float(grps_time.group(1))
1239 except (AttributeError, IndexError, ValueError, TypeError):
1240 test_result[u"status"] = u"FAIL"
1242 test_result[u"status"] = u"FAIL"
1244 self._data[u"tests"][self._test_id] = test_result
1246 def end_test(self, test):
1247 """Called when test ends.
1249 :param test: Test to process.
1254 def visit_keyword(self, keyword):
1255 """Implements traversing through the keyword and its child keywords.
1257 :param keyword: Keyword to process.
1258 :type keyword: Keyword
1261 if self.start_keyword(keyword) is not False:
1262 self.end_keyword(keyword)
1264 def start_keyword(self, keyword):
1265 """Called when keyword starts. Default implementation does nothing.
1267 :param keyword: Keyword to process.
1268 :type keyword: Keyword
1272 if keyword.type == u"setup":
1273 self.visit_setup_kw(keyword)
1274 elif keyword.type == u"teardown":
1275 self.visit_teardown_kw(keyword)
1277 self.visit_test_kw(keyword)
1278 except AttributeError:
1281 def end_keyword(self, keyword):
1282 """Called when keyword ends. Default implementation does nothing.
1284 :param keyword: Keyword to process.
1285 :type keyword: Keyword
1289 def visit_test_kw(self, test_kw):
1290 """Implements traversing through the test keyword and its child
1293 :param test_kw: Keyword to process.
1294 :type test_kw: Keyword
1297 for keyword in test_kw.keywords:
1298 if self.start_test_kw(keyword) is not False:
1299 self.visit_test_kw(keyword)
1300 self.end_test_kw(keyword)
1302 def start_test_kw(self, test_kw):
1303 """Called when test keyword starts. Default implementation does
1306 :param test_kw: Keyword to process.
1307 :type test_kw: Keyword
1310 if self._for_output == u"trending":
1313 if test_kw.name.count(u"Run Telemetry On All Duts"):
1314 self._msg_type = u"test-telemetry"
1315 self._telemetry_kw_counter += 1
1316 elif test_kw.name.count(u"Show Runtime On All Duts"):
1317 self._msg_type = u"test-show-runtime"
1318 self._sh_run_counter += 1
1321 test_kw.messages.visit(self)
1323 def end_test_kw(self, test_kw):
1324 """Called when keyword ends. Default implementation does nothing.
1326 :param test_kw: Keyword to process.
1327 :type test_kw: Keyword
1331 def visit_setup_kw(self, setup_kw):
1332 """Implements traversing through the teardown keyword and its child
1335 :param setup_kw: Keyword to process.
1336 :type setup_kw: Keyword
1339 for keyword in setup_kw.keywords:
1340 if self.start_setup_kw(keyword) is not False:
1341 self.visit_setup_kw(keyword)
1342 self.end_setup_kw(keyword)
1344 def start_setup_kw(self, setup_kw):
1345 """Called when teardown keyword starts. Default implementation does
1348 :param setup_kw: Keyword to process.
1349 :type setup_kw: Keyword
1352 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1353 and not self._version:
1354 self._msg_type = u"vpp-version"
1355 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1357 self._msg_type = u"dpdk-version"
1358 elif setup_kw.name.count(u"Set Global Variable") \
1359 and not self._timestamp:
1360 self._msg_type = u"timestamp"
1361 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1362 self._msg_type = u"testbed"
1365 setup_kw.messages.visit(self)
1367 def end_setup_kw(self, setup_kw):
1368 """Called when keyword ends. Default implementation does nothing.
1370 :param setup_kw: Keyword to process.
1371 :type setup_kw: Keyword
1375 def visit_teardown_kw(self, teardown_kw):
1376 """Implements traversing through the teardown keyword and its child
1379 :param teardown_kw: Keyword to process.
1380 :type teardown_kw: Keyword
1383 for keyword in teardown_kw.keywords:
1384 if self.start_teardown_kw(keyword) is not False:
1385 self.visit_teardown_kw(keyword)
1386 self.end_teardown_kw(keyword)
1388 def start_teardown_kw(self, teardown_kw):
1389 """Called when teardown keyword starts
1391 :param teardown_kw: Keyword to process.
1392 :type teardown_kw: Keyword
1395 if teardown_kw.name.count(u"Show Papi History On All Duts"):
1396 self._conf_history_lookup_nr = 0
1397 self._msg_type = u"teardown-papi-history"
1398 teardown_kw.messages.visit(self)
1400 def end_teardown_kw(self, teardown_kw):
1401 """Called when keyword ends. Default implementation does nothing.
1403 :param teardown_kw: Keyword to process.
1404 :type teardown_kw: Keyword
1408 def visit_message(self, msg):
1409 """Implements visiting the message.
1411 :param msg: Message to process.
1415 if self.start_message(msg) is not False:
1416 self.end_message(msg)
1418 def start_message(self, msg):
1419 """Called when message starts. Get required information from messages:
1422 :param msg: Message to process.
1427 self.parse_msg[self._msg_type](msg)
1429 def end_message(self, msg):
1430 """Called when message ends. Default implementation does nothing.
1432 :param msg: Message to process.
1441 The data is extracted from output.xml files generated by Jenkins jobs and
1442 stored in pandas' DataFrames.
1448 (as described in ExecutionChecker documentation)
1450 (as described in ExecutionChecker documentation)
1452 (as described in ExecutionChecker documentation)
1455 def __init__(self, spec, for_output):
1458 :param spec: Specification.
1459 :param for_output: Output to be generated from downloaded data.
1460 :type spec: Specification
1461 :type for_output: str
1467 self._for_output = for_output
1470 self._input_data = pd.Series()
1474 """Getter - Input data.
1476 :returns: Input data
1477 :rtype: pandas.Series
1479 return self._input_data
1481 def metadata(self, job, build):
1482 """Getter - metadata
1484 :param job: Job which metadata we want.
1485 :param build: Build which metadata we want.
1489 :rtype: pandas.Series
1491 return self.data[job][build][u"metadata"]
1493 def suites(self, job, build):
1496 :param job: Job which suites we want.
1497 :param build: Build which suites we want.
1501 :rtype: pandas.Series
1503 return self.data[job][str(build)][u"suites"]
1505 def tests(self, job, build):
1508 :param job: Job which tests we want.
1509 :param build: Build which tests we want.
1513 :rtype: pandas.Series
1515 return self.data[job][build][u"tests"]
1517 def _parse_tests(self, job, build):
1518 """Process data from robot output.xml file and return JSON structured
1521 :param job: The name of job which build output data will be processed.
1522 :param build: The build which output data will be processed.
1525 :returns: JSON data structure.
1534 with open(build[u"file-name"], u'r') as data_file:
1536 result = ExecutionResult(data_file)
1537 except errors.DataError as err:
1539 f"Error occurred while parsing output.xml: {repr(err)}"
1542 checker = ExecutionChecker(
1543 metadata, self._cfg.mapping, self._cfg.ignore, self._for_output
1545 result.visit(checker)
1549 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1550 """Download and parse the input data file.
1552 :param pid: PID of the process executing this method.
1553 :param job: Name of the Jenkins job which generated the processed input
1555 :param build: Information about the Jenkins build which generated the
1556 processed input file.
1557 :param repeat: Repeat the download specified number of times if not
1565 logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1572 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1578 f"It is not possible to download the input data file from the "
1579 f"job {job}, build {build[u'build']}, or it is damaged. "
1583 logging.info(f" Processing data from build {build[u'build']}")
1584 data = self._parse_tests(job, build)
1587 f"Input data file from the job {job}, build "
1588 f"{build[u'build']} is damaged. Skipped."
1591 state = u"processed"
1594 remove(build[u"file-name"])
1595 except OSError as err:
1597 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1600 # If the time-period is defined in the specification file, remove all
1601 # files which are outside the time period.
1603 timeperiod = self._cfg.environment.get(u"time-period", None)
1604 if timeperiod and data:
1606 timeperiod = timedelta(int(timeperiod))
1607 metadata = data.get(u"metadata", None)
1609 generated = metadata.get(u"generated", None)
1611 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1612 if (now - generated) > timeperiod:
1613 # Remove the data and the file:
1618 f" The build {job}/{build[u'build']} is "
1619 f"outdated, will be removed."
1629 def download_and_parse_data(self, repeat=1):
1630 """Download the input data files, parse input data from input files and
1631 store in pandas' Series.
1633 :param repeat: Repeat the download specified number of times if not
1638 logging.info(u"Downloading and parsing input files ...")
1640 for job, builds in self._cfg.input.items():
1641 for build in builds:
1643 result = self._download_and_parse_build(job, build, repeat)
1646 build_nr = result[u"build"][u"build"]
1649 data = result[u"data"]
1650 build_data = pd.Series({
1651 u"metadata": pd.Series(
1652 list(data[u"metadata"].values()),
1653 index=list(data[u"metadata"].keys())
1655 u"suites": pd.Series(
1656 list(data[u"suites"].values()),
1657 index=list(data[u"suites"].keys())
1659 u"tests": pd.Series(
1660 list(data[u"tests"].values()),
1661 index=list(data[u"tests"].keys())
1665 if self._input_data.get(job, None) is None:
1666 self._input_data[job] = pd.Series()
1667 self._input_data[job][str(build_nr)] = build_data
1668 self._cfg.set_input_file_name(
1669 job, build_nr, result[u"build"][u"file-name"]
1671 self._cfg.set_input_state(job, build_nr, result[u"state"])
1674 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1675 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1677 logging.info(u"Done.")
1679 msg = f"Successful downloads from the sources:\n"
1680 for source in self._cfg.environment[u"data-sources"]:
1681 if source[u"successful-downloads"]:
1683 f"{source[u'url']}/{source[u'path']}/"
1684 f"{source[u'file-name']}: "
1685 f"{source[u'successful-downloads']}\n"
1689 def process_local_file(self, local_file, job=u"local", build_nr=1,
1691 """Process local XML file given as a command-line parameter.
1693 :param local_file: The file to process.
1694 :param job: Job name.
1695 :param build_nr: Build number.
1696 :param replace: If True, the information about jobs and builds is
1697 replaced by the new one, otherwise the new jobs and builds are
1699 :type local_file: str
1703 :raises: PresentationError if an error occurs.
1705 if not isfile(local_file):
1706 raise PresentationError(f"The file {local_file} does not exist.")
1709 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1710 except (IndexError, ValueError):
1715 u"status": u"failed",
1716 u"file-name": local_file
1719 self._cfg.input = dict()
1720 self._cfg.add_build(job, build)
1722 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1723 data = self._parse_tests(job, build)
1725 raise PresentationError(
1726 f"Error occurred while parsing the file {local_file}"
1729 build_data = pd.Series({
1730 u"metadata": pd.Series(
1731 list(data[u"metadata"].values()),
1732 index=list(data[u"metadata"].keys())
1734 u"suites": pd.Series(
1735 list(data[u"suites"].values()),
1736 index=list(data[u"suites"].keys())
1738 u"tests": pd.Series(
1739 list(data[u"tests"].values()),
1740 index=list(data[u"tests"].keys())
1744 if self._input_data.get(job, None) is None:
1745 self._input_data[job] = pd.Series()
1746 self._input_data[job][str(build_nr)] = build_data
1748 self._cfg.set_input_state(job, build_nr, u"processed")
1750 def process_local_directory(self, local_dir, replace=True):
1751 """Process local directory with XML file(s). The directory is processed
1752 as a 'job' and the XML files in it as builds.
1753 If the given directory contains only sub-directories, these
1754 sub-directories processed as jobs and corresponding XML files as builds
1757 :param local_dir: Local directory to process.
1758 :param replace: If True, the information about jobs and builds is
1759 replaced by the new one, otherwise the new jobs and builds are
1761 :type local_dir: str
1764 if not isdir(local_dir):
1765 raise PresentationError(
1766 f"The directory {local_dir} does not exist."
1769 # Check if the given directory includes only files, or only directories
1770 _, dirnames, filenames = next(walk(local_dir))
1772 if filenames and not dirnames:
1775 # key: dir (job) name, value: list of file names (builds)
1777 local_dir: [join(local_dir, name) for name in filenames]
1780 elif dirnames and not filenames:
1783 # key: dir (job) name, value: list of file names (builds)
1784 local_builds = dict()
1785 for dirname in dirnames:
1787 join(local_dir, dirname, name)
1788 for name in listdir(join(local_dir, dirname))
1789 if isfile(join(local_dir, dirname, name))
1792 local_builds[dirname] = sorted(builds)
1794 elif not filenames and not dirnames:
1795 raise PresentationError(f"The directory {local_dir} is empty.")
1797 raise PresentationError(
1798 f"The directory {local_dir} can include only files or only "
1799 f"directories, not both.\nThe directory {local_dir} includes "
1800 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1804 self._cfg.input = dict()
1806 for job, files in local_builds.items():
1807 for idx, local_file in enumerate(files):
1808 self.process_local_file(local_file, job, idx + 1, replace=False)
1811 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1812 """Return the index of character in the string which is the end of tag.
1814 :param tag_filter: The string where the end of tag is being searched.
1815 :param start: The index where the searching is stated.
1816 :param closer: The character which is the tag closer.
1817 :type tag_filter: str
1820 :returns: The index of the tag closer.
1824 idx_opener = tag_filter.index(closer, start)
1825 return tag_filter.index(closer, idx_opener + 1)
1830 def _condition(tag_filter):
1831 """Create a conditional statement from the given tag filter.
1833 :param tag_filter: Filter based on tags from the element specification.
1834 :type tag_filter: str
1835 :returns: Conditional statement which can be evaluated.
1840 index = InputData._end_of_tag(tag_filter, index)
1844 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1846 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1847 continue_on_error=False):
1848 """Filter required data from the given jobs and builds.
1850 The output data structure is:
1853 - test (or suite) 1 ID:
1859 - test (or suite) n ID:
1866 :param element: Element which will use the filtered data.
1867 :param params: Parameters which will be included in the output. If None,
1868 all parameters are included.
1869 :param data: If not None, this data is used instead of data specified
1871 :param data_set: The set of data to be filtered: tests, suites,
1873 :param continue_on_error: Continue if there is error while reading the
1874 data. The Item will be empty then
1875 :type element: pandas.Series
1879 :type continue_on_error: bool
1880 :returns: Filtered data.
1881 :rtype pandas.Series
1885 if data_set == "suites":
1887 elif element[u"filter"] in (u"all", u"template"):
1890 cond = InputData._condition(element[u"filter"])
1891 logging.debug(f" Filter: {cond}")
1893 logging.error(u" No filter defined.")
1897 params = element.get(u"parameters", None)
1899 params.extend((u"type", u"status"))
1901 data_to_filter = data if data else element[u"data"]
1904 for job, builds in data_to_filter.items():
1905 data[job] = pd.Series()
1906 for build in builds:
1907 data[job][str(build)] = pd.Series()
1910 self.data[job][str(build)][data_set].items())
1912 if continue_on_error:
1916 for test_id, test_data in data_dict.items():
1917 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1918 data[job][str(build)][test_id] = pd.Series()
1920 for param, val in test_data.items():
1921 data[job][str(build)][test_id][param] = val
1923 for param in params:
1925 data[job][str(build)][test_id][param] =\
1928 data[job][str(build)][test_id][param] =\
1932 except (KeyError, IndexError, ValueError) as err:
1934 f"Missing mandatory parameter in the element specification: "
1938 except AttributeError as err:
1939 logging.error(repr(err))
1941 except SyntaxError as err:
1943 f"The filter {cond} is not correct. Check if all tags are "
1944 f"enclosed by apostrophes.\n{repr(err)}"
1948 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1949 continue_on_error=False):
1950 """Filter required data from the given jobs and builds.
1952 The output data structure is:
1955 - test (or suite) 1 ID:
1961 - test (or suite) n ID:
1968 :param element: Element which will use the filtered data.
1969 :param params: Parameters which will be included in the output. If None,
1970 all parameters are included.
1971 :param data_set: The set of data to be filtered: tests, suites,
1973 :param continue_on_error: Continue if there is error while reading the
1974 data. The Item will be empty then
1975 :type element: pandas.Series
1978 :type continue_on_error: bool
1979 :returns: Filtered data.
1980 :rtype pandas.Series
1983 include = element.get(u"include", None)
1985 logging.warning(u"No tests to include, skipping the element.")
1989 params = element.get(u"parameters", None)
1990 if params and u"type" not in params:
1991 params.append(u"type")
1993 cores = element.get(u"core", None)
1997 for test in include:
1998 tests.append(test.format(core=core))
2004 for job, builds in element[u"data"].items():
2005 data[job] = pd.Series()
2006 for build in builds:
2007 data[job][str(build)] = pd.Series()
2010 reg_ex = re.compile(str(test).lower())
2011 for test_id in self.data[job][
2012 str(build)][data_set].keys():
2013 if re.match(reg_ex, str(test_id).lower()):
2014 test_data = self.data[job][
2015 str(build)][data_set][test_id]
2016 data[job][str(build)][test_id] = pd.Series()
2018 for param, val in test_data.items():
2019 data[job][str(build)][test_id]\
2022 for param in params:
2024 data[job][str(build)][
2028 data[job][str(build)][
2029 test_id][param] = u"No Data"
2030 except KeyError as err:
2031 if continue_on_error:
2032 logging.debug(repr(err))
2034 logging.error(repr(err))
2038 except (KeyError, IndexError, ValueError) as err:
2040 f"Missing mandatory parameter in the element "
2041 f"specification: {repr(err)}"
2044 except AttributeError as err:
2045 logging.error(repr(err))
2049 def merge_data(data):
2050 """Merge data from more jobs and builds to a simple data structure.
2052 The output data structure is:
2054 - test (suite) 1 ID:
2060 - test (suite) n ID:
2063 :param data: Data to merge.
2064 :type data: pandas.Series
2065 :returns: Merged data.
2066 :rtype: pandas.Series
2069 logging.info(u" Merging data ...")
2071 merged_data = pd.Series()
2072 for builds in data.values:
2073 for item in builds.values:
2074 for item_id, item_data in item.items():
2075 merged_data[item_id] = item_data
2078 def print_all_oper_data(self):
2079 """Print all operational data to console.
2082 for job in self._input_data.values:
2083 for build in job.values:
2084 for test_id, test_data in build[u"tests"].items():
2086 if test_data.get(u"show-run", None) is None:
2088 for dut_name, data in test_data[u"show-run"].items():
2089 if data.get(u"runtime", None) is None:
2091 runtime = loads(data[u"runtime"])
2093 threads_nr = len(runtime[0][u"clocks"])
2094 except (IndexError, KeyError):
2096 threads = OrderedDict(
2097 {idx: list() for idx in range(threads_nr)})
2098 for item in runtime:
2099 for idx in range(threads_nr):
2100 if item[u"vectors"][idx] > 0:
2101 clocks = item[u"clocks"][idx] / \
2102 item[u"vectors"][idx]
2103 elif item[u"calls"][idx] > 0:
2104 clocks = item[u"clocks"][idx] / \
2106 elif item[u"suspends"][idx] > 0:
2107 clocks = item[u"clocks"][idx] / \
2108 item[u"suspends"][idx]
2112 if item[u"calls"][idx] > 0:
2113 vectors_call = item[u"vectors"][idx] / \
2118 if int(item[u"calls"][idx]) + int(
2119 item[u"vectors"][idx]) + \
2120 int(item[u"suspends"][idx]):
2121 threads[idx].append([
2123 item[u"calls"][idx],
2124 item[u"vectors"][idx],
2125 item[u"suspends"][idx],
2130 print(f"Host IP: {data.get(u'host', '')}, "
2131 f"Socket: {data.get(u'socket', '')}")
2132 for thread_nr, thread in threads.items():
2133 txt_table = prettytable.PrettyTable(
2139 u"Cycles per Packet",
2140 u"Average Vector Size"
2145 txt_table.add_row(row)
2147 if len(thread) == 0:
2150 avg = f", Average Vector Size per Node: " \
2151 f"{(avg / len(thread)):.2f}"
2152 th_name = u"main" if thread_nr == 0 \
2153 else f"worker_{thread_nr}"
2154 print(f"{dut_name}, {th_name}{avg}")
2155 txt_table.float_format = u".2"
2156 txt_table.align = u"r"
2157 txt_table.align[u"Name"] = u"l"
2158 print(f"{txt_table.get_string()}\n")