1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_CPS_MSG_INFO = re.compile(
233 r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234 r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
236 REGEX_PPS_MSG_INFO = re.compile(
237 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
240 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
242 # Needed for CPS and PPS tests
243 REGEX_NDRPDR_LAT_BASE = re.compile(
244 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
245 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
247 REGEX_NDRPDR_LAT = re.compile(
248 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
249 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
250 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
251 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
256 REGEX_VERSION_VPP = re.compile(
257 r"(return STDOUT Version:\s*|"
258 r"VPP Version:\s*|VPP version:\s*)(.*)"
260 REGEX_VERSION_DPDK = re.compile(
261 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
263 REGEX_TCP = re.compile(
264 r'Total\s(rps|cps|throughput):\s(\d*).*$'
266 REGEX_MRR = re.compile(
267 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
268 r'tx\s(\d*),\srx\s(\d*)'
270 REGEX_BMRR = re.compile(
271 r'.*trial results.*: \[(.*)\]'
273 REGEX_RECONF_LOSS = re.compile(
274 r'Packets lost due to reconfig: (\d*)'
276 REGEX_RECONF_TIME = re.compile(
277 r'Implied time lost: (\d*.[\de-]*)'
279 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
281 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
283 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
285 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
287 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
289 def __init__(self, metadata, mapping, ignore):
292 :param metadata: Key-value pairs to be included in "metadata" part of
294 :param mapping: Mapping of the old names of test cases to the new
296 :param ignore: List of TCs to be ignored.
302 # Type of message to parse out from the test messages
303 self._msg_type = None
309 self._timestamp = None
311 # Testbed. The testbed is identified by TG node IP address.
314 # Mapping of TCs long names
315 self._mapping = mapping
318 self._ignore = ignore
320 # Number of PAPI History messages found:
322 # 1 - PAPI History of DUT1
323 # 2 - PAPI History of DUT2
324 self._conf_history_lookup_nr = 0
326 self._sh_run_counter = 0
328 # Test ID of currently processed test- the lowercase full path to the
332 # The main data structure
334 u"metadata": OrderedDict(),
335 u"suites": OrderedDict(),
336 u"tests": OrderedDict()
339 # Save the provided metadata
340 for key, val in metadata.items():
341 self._data[u"metadata"][key] = val
343 # Dictionary defining the methods used to parse different types of
346 u"timestamp": self._get_timestamp,
347 u"vpp-version": self._get_vpp_version,
348 u"dpdk-version": self._get_dpdk_version,
349 # TODO: Remove when not needed:
350 u"teardown-vat-history": self._get_vat_history,
351 u"teardown-papi-history": self._get_papi_history,
352 u"test-show-runtime": self._get_show_run,
353 u"testbed": self._get_testbed
358 """Getter - Data parsed from the XML file.
360 :returns: Data parsed from the XML file.
365 def _get_data_from_mrr_test_msg(self, msg):
366 """Get info from message of MRR performance tests.
368 :param msg: Message to be processed.
370 :returns: Processed message or original message if a problem occurs.
374 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
375 if not groups or groups.lastindex != 1:
376 return u"Test Failed."
379 data = groups.group(1).split(u", ")
380 except (AttributeError, IndexError, ValueError, KeyError):
381 return u"Test Failed."
386 out_str += f"{(float(item) / 1e6):.2f}, "
387 return out_str[:-2] + u"]"
388 except (AttributeError, IndexError, ValueError, KeyError):
389 return u"Test Failed."
391 def _get_data_from_cps_test_msg(self, msg):
392 """Get info from message of NDRPDR CPS tests.
394 :param msg: Message to be processed.
396 :returns: Processed message or "Test Failed." if a problem occurs.
400 groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
401 if not groups or groups.lastindex != 2:
402 return u"Test Failed."
406 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
407 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
409 except (AttributeError, IndexError, ValueError, KeyError):
410 return u"Test Failed."
412 def _get_data_from_pps_test_msg(self, msg):
413 """Get info from message of NDRPDR PPS tests.
415 :param msg: Message to be processed.
417 :returns: Processed message or "Test Failed." if a problem occurs.
421 groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
422 if not groups or groups.lastindex != 4:
423 return u"Test Failed."
427 f"1. {(float(groups.group(1)) / 1e6):5.2f} "
428 f"{float(groups.group(2)):5.2f}\n"
429 f"2. {(float(groups.group(3)) / 1e6):5.2f} "
430 f"{float(groups.group(4)):5.2f}"
432 except (AttributeError, IndexError, ValueError, KeyError):
433 return u"Test Failed."
435 def _get_data_from_perf_test_msg(self, msg):
436 """Get info from message of NDRPDR performance tests.
438 :param msg: Message to be processed.
440 :returns: Processed message or "Test Failed." if a problem occurs.
444 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
445 if not groups or groups.lastindex != 10:
446 return u"Test Failed."
450 u"ndr_low": float(groups.group(1)),
451 u"ndr_low_b": float(groups.group(2)),
452 u"pdr_low": float(groups.group(3)),
453 u"pdr_low_b": float(groups.group(4)),
454 u"pdr_lat_90_1": groups.group(5),
455 u"pdr_lat_90_2": groups.group(6),
456 u"pdr_lat_50_1": groups.group(7),
457 u"pdr_lat_50_2": groups.group(8),
458 u"pdr_lat_10_1": groups.group(9),
459 u"pdr_lat_10_2": groups.group(10),
461 except (AttributeError, IndexError, ValueError, KeyError):
462 return u"Test Failed."
464 def _process_lat(in_str_1, in_str_2):
465 """Extract min, avg, max values from latency string.
467 :param in_str_1: Latency string for one direction produced by robot
469 :param in_str_2: Latency string for second direction produced by
473 :returns: Processed latency string or None if a problem occurs.
476 in_list_1 = in_str_1.split('/', 3)
477 in_list_2 = in_str_2.split('/', 3)
479 if len(in_list_1) != 4 and len(in_list_2) != 4:
482 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
484 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
485 except hdrh.codec.HdrLengthException:
488 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
490 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
491 except hdrh.codec.HdrLengthException:
494 if hdr_lat_1 and hdr_lat_2:
496 hdr_lat_1.get_value_at_percentile(50.0),
497 hdr_lat_1.get_value_at_percentile(90.0),
498 hdr_lat_1.get_value_at_percentile(99.0),
499 hdr_lat_2.get_value_at_percentile(50.0),
500 hdr_lat_2.get_value_at_percentile(90.0),
501 hdr_lat_2.get_value_at_percentile(99.0)
511 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
512 f"{data[u'ndr_low_b']:5.2f}"
513 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
514 f"{data[u'pdr_low_b']:5.2f}"
517 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
518 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
519 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
522 max_len = len(str(max((max(item) for item in latency))))
523 max_len = 4 if max_len < 4 else max_len
525 for idx, lat in enumerate(latency):
530 f"{lat[0]:{max_len}d} "
531 f"{lat[1]:{max_len}d} "
532 f"{lat[2]:{max_len}d} "
533 f"{lat[3]:{max_len}d} "
534 f"{lat[4]:{max_len}d} "
535 f"{lat[5]:{max_len}d} "
540 except (AttributeError, IndexError, ValueError, KeyError):
541 return u"Test Failed."
543 def _get_testbed(self, msg):
544 """Called when extraction of testbed IP is required.
545 The testbed is identified by TG node IP address.
547 :param msg: Message to process.
552 if msg.message.count(u"Setup of TG node") or \
553 msg.message.count(u"Setup of node TG host"):
554 reg_tg_ip = re.compile(
555 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
557 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
558 except (KeyError, ValueError, IndexError, AttributeError):
561 self._data[u"metadata"][u"testbed"] = self._testbed
562 self._msg_type = None
564 def _get_vpp_version(self, msg):
565 """Called when extraction of VPP version is required.
567 :param msg: Message to process.
572 if msg.message.count(u"return STDOUT Version:") or \
573 msg.message.count(u"VPP Version:") or \
574 msg.message.count(u"VPP version:"):
576 re.search(self.REGEX_VERSION_VPP, msg.message).group(2)
578 self._data[u"metadata"][u"version"] = self._version
579 self._msg_type = None
581 def _get_dpdk_version(self, msg):
582 """Called when extraction of DPDK version is required.
584 :param msg: Message to process.
589 if msg.message.count(u"DPDK Version:"):
591 self._version = str(re.search(
592 self.REGEX_VERSION_DPDK, msg.message).group(2))
593 self._data[u"metadata"][u"version"] = self._version
597 self._msg_type = None
599 def _get_timestamp(self, msg):
600 """Called when extraction of timestamp is required.
602 :param msg: Message to process.
607 self._timestamp = msg.timestamp[:14]
608 self._data[u"metadata"][u"generated"] = self._timestamp
609 self._msg_type = None
611 def _get_vat_history(self, msg):
612 """Called when extraction of VAT command history is required.
614 TODO: Remove when not needed.
616 :param msg: Message to process.
620 if msg.message.count(u"VAT command history:"):
621 self._conf_history_lookup_nr += 1
622 if self._conf_history_lookup_nr == 1:
623 self._data[u"tests"][self._test_id][u"conf-history"] = str()
625 self._msg_type = None
627 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} VAT command history:",
631 ).replace(u'\n', u' |br| ').replace(u'"', u"'")
633 self._data[u"tests"][self._test_id][u"conf-history"] += (
634 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
637 def _get_papi_history(self, msg):
638 """Called when extraction of PAPI command history is required.
640 :param msg: Message to process.
644 if msg.message.count(u"PAPI command history:"):
645 self._conf_history_lookup_nr += 1
646 if self._conf_history_lookup_nr == 1:
647 self._data[u"tests"][self._test_id][u"conf-history"] = str()
649 self._msg_type = None
651 r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} PAPI command history:",
655 ).replace(u'\n', u' |br| ').replace(u'"', u"'")
656 self._data[u"tests"][self._test_id][u"conf-history"] += (
657 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
660 def _get_show_run(self, msg):
661 """Called when extraction of VPP operational data (output of CLI command
662 Show Runtime) is required.
664 :param msg: Message to process.
669 if not msg.message.count(u"stats runtime"):
673 if self._sh_run_counter > 1:
676 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
677 self._data[u"tests"][self._test_id][u"show-run"] = dict()
679 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
683 host = groups.group(1)
684 except (AttributeError, IndexError):
687 sock = groups.group(2)
688 except (AttributeError, IndexError):
691 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
692 replace(u"'", u'"').replace(u'b"', u'"').
693 replace(u'u"', u'"').split(u":", 1)[1])
696 threads_nr = len(runtime[0][u"clocks"])
697 except (IndexError, KeyError):
700 dut = u"DUT{nr}".format(
701 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
706 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
710 for idx in range(threads_nr):
711 if item[u"vectors"][idx] > 0:
712 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
713 elif item[u"calls"][idx] > 0:
714 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
715 elif item[u"suspends"][idx] > 0:
716 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
720 if item[u"calls"][idx] > 0:
721 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
725 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
726 int(item[u"suspends"][idx]):
727 oper[u"threads"][idx].append([
730 item[u"vectors"][idx],
731 item[u"suspends"][idx],
736 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
738 def _get_ndrpdr_throughput(self, msg):
739 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
742 :param msg: The test message to be parsed.
744 :returns: Parsed data as a dict and the status (PASS/FAIL).
745 :rtype: tuple(dict, str)
749 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
750 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
753 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
755 if groups is not None:
757 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
758 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
759 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
760 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
762 except (IndexError, ValueError):
765 return throughput, status
767 def _get_ndrpdr_throughput_gbps(self, msg):
768 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
771 :param msg: The test message to be parsed.
773 :returns: Parsed data as a dict and the status (PASS/FAIL).
774 :rtype: tuple(dict, str)
778 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
779 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
782 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
784 if groups is not None:
786 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
787 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
788 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
789 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
791 except (IndexError, ValueError):
796 def _get_plr_throughput(self, msg):
797 """Get PLRsearch lower bound and PLRsearch upper bound from the test
800 :param msg: The test message to be parsed.
802 :returns: Parsed data as a dict and the status (PASS/FAIL).
803 :rtype: tuple(dict, str)
811 groups = re.search(self.REGEX_PLR_RATE, msg)
813 if groups is not None:
815 throughput[u"LOWER"] = float(groups.group(1))
816 throughput[u"UPPER"] = float(groups.group(2))
818 except (IndexError, ValueError):
821 return throughput, status
823 def _get_ndrpdr_latency(self, msg):
824 """Get LATENCY from the test message.
826 :param msg: The test message to be parsed.
828 :returns: Parsed data as a dict and the status (PASS/FAIL).
829 :rtype: tuple(dict, str)
839 u"direction1": copy.copy(latency_default),
840 u"direction2": copy.copy(latency_default)
843 u"direction1": copy.copy(latency_default),
844 u"direction2": copy.copy(latency_default)
847 u"direction1": copy.copy(latency_default),
848 u"direction2": copy.copy(latency_default)
851 u"direction1": copy.copy(latency_default),
852 u"direction2": copy.copy(latency_default)
855 u"direction1": copy.copy(latency_default),
856 u"direction2": copy.copy(latency_default)
859 u"direction1": copy.copy(latency_default),
860 u"direction2": copy.copy(latency_default)
864 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
866 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
868 return latency, u"FAIL"
870 def process_latency(in_str):
871 """Return object with parsed latency values.
873 TODO: Define class for the return type.
875 :param in_str: Input string, min/avg/max/hdrh format.
877 :returns: Dict with corresponding keys, except hdrh float values.
879 :throws IndexError: If in_str does not have enough substrings.
880 :throws ValueError: If a substring does not convert to float.
882 in_list = in_str.split('/', 3)
885 u"min": float(in_list[0]),
886 u"avg": float(in_list[1]),
887 u"max": float(in_list[2]),
891 if len(in_list) == 4:
892 rval[u"hdrh"] = str(in_list[3])
897 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
898 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
899 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
900 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
901 if groups.lastindex == 4:
902 return latency, u"PASS"
903 except (IndexError, ValueError):
907 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
908 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
909 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
910 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
911 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
912 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
913 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
914 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
915 if groups.lastindex == 12:
916 return latency, u"PASS"
917 except (IndexError, ValueError):
920 # TODO: Remove when not needed
921 latency[u"NDR10"] = {
922 u"direction1": copy.copy(latency_default),
923 u"direction2": copy.copy(latency_default)
925 latency[u"NDR50"] = {
926 u"direction1": copy.copy(latency_default),
927 u"direction2": copy.copy(latency_default)
929 latency[u"NDR90"] = {
930 u"direction1": copy.copy(latency_default),
931 u"direction2": copy.copy(latency_default)
934 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
935 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
936 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
937 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
938 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
939 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
940 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
941 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
942 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
943 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
944 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
945 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
946 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
947 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
948 return latency, u"PASS"
949 except (IndexError, ValueError):
952 return latency, u"FAIL"
955 def _get_hoststack_data(msg, tags):
956 """Get data from the hoststack test message.
958 :param msg: The test message to be parsed.
959 :param tags: Test tags.
962 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
963 :rtype: tuple(dict, str)
968 msg = msg.replace(u"'", u'"').replace(u" ", u"")
969 if u"LDPRELOAD" in tags:
973 except JSONDecodeError:
975 elif u"VPPECHO" in tags:
977 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
979 client=loads(msg_lst[0]),
980 server=loads(msg_lst[1])
983 except (JSONDecodeError, IndexError):
986 return result, status
988 def visit_suite(self, suite):
989 """Implements traversing through the suite and its direct children.
991 :param suite: Suite to process.
995 if self.start_suite(suite) is not False:
996 suite.suites.visit(self)
997 suite.tests.visit(self)
998 self.end_suite(suite)
1000 def start_suite(self, suite):
1001 """Called when suite starts.
1003 :param suite: Suite to process.
1009 parent_name = suite.parent.name
1010 except AttributeError:
1013 doc_str = suite.doc.\
1014 replace(u'"', u"'").\
1015 replace(u'\n', u' ').\
1016 replace(u'\r', u'').\
1017 replace(u'*[', u' |br| *[').\
1018 replace(u"*", u"**").\
1019 replace(u' |br| *[', u'*[', 1)
1021 self._data[u"suites"][suite.longname.lower().
1022 replace(u'"', u"'").
1023 replace(u" ", u"_")] = {
1024 u"name": suite.name.lower(),
1026 u"parent": parent_name,
1027 u"level": len(suite.longname.split(u"."))
1030 suite.keywords.visit(self)
1032 def end_suite(self, suite):
1033 """Called when suite ends.
1035 :param suite: Suite to process.
1040 def visit_test(self, test):
1041 """Implements traversing through the test.
1043 :param test: Test to process.
1047 if self.start_test(test) is not False:
1048 test.keywords.visit(self)
1051 def start_test(self, test):
1052 """Called when test starts.
1054 :param test: Test to process.
1059 self._sh_run_counter = 0
1061 longname_orig = test.longname.lower()
1063 # Check the ignore list
1064 if longname_orig in self._ignore:
1067 tags = [str(tag) for tag in test.tags]
1068 test_result = dict()
1070 # Change the TC long name and name if defined in the mapping table
1071 longname = self._mapping.get(longname_orig, None)
1072 if longname is not None:
1073 name = longname.split(u'.')[-1]
1075 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1079 longname = longname_orig
1080 name = test.name.lower()
1082 # Remove TC number from the TC long name (backward compatibility):
1083 self._test_id = re.sub(
1084 self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
1086 # Remove TC number from the TC name (not needed):
1087 test_result[u"name"] = re.sub(
1088 self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
1091 test_result[u"parent"] = test.parent.name.lower().\
1092 replace(u"snat", u"nat")
1093 test_result[u"tags"] = tags
1094 test_result["doc"] = test.doc.\
1095 replace(u'"', u"'").\
1096 replace(u'\n', u' ').\
1097 replace(u'\r', u'').\
1098 replace(u'[', u' |br| [').\
1099 replace(u' |br| [', u'[', 1)
1100 test_result[u"type"] = u"FUNC"
1101 test_result[u"status"] = test.status
1103 if test.status == u"PASS":
1104 if u"NDRPDR" in tags:
1105 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1106 test_result[u"msg"] = self._get_data_from_pps_test_msg(
1107 test.message).replace(u'\n', u' |br| '). \
1108 replace(u'\r', u'').replace(u'"', u"'")
1109 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1110 test_result[u"msg"] = self._get_data_from_cps_test_msg(
1111 test.message).replace(u'\n', u' |br| '). \
1112 replace(u'\r', u'').replace(u'"', u"'")
1114 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1115 test.message).replace(u'\n', u' |br| ').\
1116 replace(u'\r', u'').replace(u'"', u"'")
1117 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1118 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1119 test.message).replace(u'\n', u' |br| ').\
1120 replace(u'\r', u'').replace(u'"', u"'")
1122 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1123 replace(u'\r', u'').replace(u'"', u"'")
1125 test_result[u"msg"] = u"Test Failed."
1127 if u"PERFTEST" in tags:
1128 # Replace info about cores (e.g. -1c-) with the info about threads
1129 # and cores (e.g. -1t1c-) in the long test case names and in the
1130 # test case names if necessary.
1131 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1135 for tag in test_result[u"tags"]:
1136 groups = re.search(self.REGEX_TC_TAG, tag)
1142 self._test_id = re.sub(
1143 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1144 self._test_id, count=1
1146 test_result[u"name"] = re.sub(
1147 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1148 test_result["name"], count=1
1151 test_result[u"status"] = u"FAIL"
1152 self._data[u"tests"][self._test_id] = test_result
1154 f"The test {self._test_id} has no or more than one "
1155 f"multi-threading tags.\n"
1156 f"Tags: {test_result[u'tags']}"
1160 if test.status == u"PASS":
1161 if u"DEVICETEST" in tags:
1162 test_result[u"type"] = u"DEVICETEST"
1163 elif u"NDRPDR" in tags:
1164 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1165 test_result[u"type"] = u"CPS"
1167 test_result[u"type"] = u"NDRPDR"
1168 test_result[u"throughput"], test_result[u"status"] = \
1169 self._get_ndrpdr_throughput(test.message)
1170 test_result[u"gbps"], test_result[u"status"] = \
1171 self._get_ndrpdr_throughput_gbps(test.message)
1172 test_result[u"latency"], test_result[u"status"] = \
1173 self._get_ndrpdr_latency(test.message)
1174 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1176 test_result[u"type"] = u"MRR"
1178 test_result[u"type"] = u"BMRR"
1180 test_result[u"result"] = dict()
1181 groups = re.search(self.REGEX_BMRR, test.message)
1182 if groups is not None:
1183 items_str = groups.group(1)
1185 float(item.strip().replace(u"'", u""))
1186 for item in items_str.split(",")
1188 # Use whole list in CSIT-1180.
1189 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1190 test_result[u"result"][u"samples"] = items_float
1191 test_result[u"result"][u"receive-rate"] = stats.avg
1192 test_result[u"result"][u"receive-stdev"] = stats.stdev
1194 groups = re.search(self.REGEX_MRR, test.message)
1195 test_result[u"result"][u"receive-rate"] = \
1196 float(groups.group(3)) / float(groups.group(1))
1197 elif u"SOAK" in tags:
1198 test_result[u"type"] = u"SOAK"
1199 test_result[u"throughput"], test_result[u"status"] = \
1200 self._get_plr_throughput(test.message)
1201 elif u"HOSTSTACK" in tags:
1202 test_result[u"type"] = u"HOSTSTACK"
1203 test_result[u"result"], test_result[u"status"] = \
1204 self._get_hoststack_data(test.message, tags)
1205 elif u"TCP" in tags:
1206 test_result[u"type"] = u"TCP"
1207 groups = re.search(self.REGEX_TCP, test.message)
1208 test_result[u"result"] = int(groups.group(2))
1209 elif u"RECONF" in tags:
1210 test_result[u"type"] = u"RECONF"
1211 test_result[u"result"] = None
1213 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1214 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1215 test_result[u"result"] = {
1216 u"loss": int(grps_loss.group(1)),
1217 u"time": float(grps_time.group(1))
1219 except (AttributeError, IndexError, ValueError, TypeError):
1220 test_result[u"status"] = u"FAIL"
1222 test_result[u"status"] = u"FAIL"
1223 self._data[u"tests"][self._test_id] = test_result
1226 self._data[u"tests"][self._test_id] = test_result
1228 def end_test(self, test):
1229 """Called when test ends.
1231 :param test: Test to process.
1236 def visit_keyword(self, keyword):
1237 """Implements traversing through the keyword and its child keywords.
1239 :param keyword: Keyword to process.
1240 :type keyword: Keyword
1243 if self.start_keyword(keyword) is not False:
1244 self.end_keyword(keyword)
1246 def start_keyword(self, keyword):
1247 """Called when keyword starts. Default implementation does nothing.
1249 :param keyword: Keyword to process.
1250 :type keyword: Keyword
1254 if keyword.type == u"setup":
1255 self.visit_setup_kw(keyword)
1256 elif keyword.type == u"teardown":
1257 self.visit_teardown_kw(keyword)
1259 self.visit_test_kw(keyword)
1260 except AttributeError:
1263 def end_keyword(self, keyword):
1264 """Called when keyword ends. Default implementation does nothing.
1266 :param keyword: Keyword to process.
1267 :type keyword: Keyword
1271 def visit_test_kw(self, test_kw):
1272 """Implements traversing through the test keyword and its child
1275 :param test_kw: Keyword to process.
1276 :type test_kw: Keyword
1279 for keyword in test_kw.keywords:
1280 if self.start_test_kw(keyword) is not False:
1281 self.visit_test_kw(keyword)
1282 self.end_test_kw(keyword)
1284 def start_test_kw(self, test_kw):
1285 """Called when test keyword starts. Default implementation does
1288 :param test_kw: Keyword to process.
1289 :type test_kw: Keyword
1292 if test_kw.name.count(u"Show Runtime On All Duts") or \
1293 test_kw.name.count(u"Show Runtime Counters On All Duts") or \
1294 test_kw.name.count(u"Vpp Show Runtime On All Duts"):
1295 self._msg_type = u"test-show-runtime"
1296 self._sh_run_counter += 1
1299 test_kw.messages.visit(self)
1301 def end_test_kw(self, test_kw):
1302 """Called when keyword ends. Default implementation does nothing.
1304 :param test_kw: Keyword to process.
1305 :type test_kw: Keyword
1309 def visit_setup_kw(self, setup_kw):
1310 """Implements traversing through the teardown keyword and its child
1313 :param setup_kw: Keyword to process.
1314 :type setup_kw: Keyword
1317 for keyword in setup_kw.keywords:
1318 if self.start_setup_kw(keyword) is not False:
1319 self.visit_setup_kw(keyword)
1320 self.end_setup_kw(keyword)
1322 def start_setup_kw(self, setup_kw):
1323 """Called when teardown keyword starts. Default implementation does
1326 :param setup_kw: Keyword to process.
1327 :type setup_kw: Keyword
1330 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1331 and not self._version:
1332 self._msg_type = u"vpp-version"
1333 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1335 self._msg_type = u"dpdk-version"
1336 elif setup_kw.name.count(u"Set Global Variable") \
1337 and not self._timestamp:
1338 self._msg_type = u"timestamp"
1339 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1340 self._msg_type = u"testbed"
1343 setup_kw.messages.visit(self)
1345 def end_setup_kw(self, setup_kw):
1346 """Called when keyword ends. Default implementation does nothing.
1348 :param setup_kw: Keyword to process.
1349 :type setup_kw: Keyword
1353 def visit_teardown_kw(self, teardown_kw):
1354 """Implements traversing through the teardown keyword and its child
1357 :param teardown_kw: Keyword to process.
1358 :type teardown_kw: Keyword
1361 for keyword in teardown_kw.keywords:
1362 if self.start_teardown_kw(keyword) is not False:
1363 self.visit_teardown_kw(keyword)
1364 self.end_teardown_kw(keyword)
1366 def start_teardown_kw(self, teardown_kw):
1367 """Called when teardown keyword starts
1369 :param teardown_kw: Keyword to process.
1370 :type teardown_kw: Keyword
1374 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1375 # TODO: Remove when not needed:
1376 self._conf_history_lookup_nr = 0
1377 self._msg_type = u"teardown-vat-history"
1378 teardown_kw.messages.visit(self)
1379 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1380 self._conf_history_lookup_nr = 0
1381 self._msg_type = u"teardown-papi-history"
1382 teardown_kw.messages.visit(self)
1384 def end_teardown_kw(self, teardown_kw):
1385 """Called when keyword ends. Default implementation does nothing.
1387 :param teardown_kw: Keyword to process.
1388 :type teardown_kw: Keyword
1392 def visit_message(self, msg):
1393 """Implements visiting the message.
1395 :param msg: Message to process.
1399 if self.start_message(msg) is not False:
1400 self.end_message(msg)
1402 def start_message(self, msg):
1403 """Called when message starts. Get required information from messages:
1406 :param msg: Message to process.
1411 self.parse_msg[self._msg_type](msg)
1413 def end_message(self, msg):
1414 """Called when message ends. Default implementation does nothing.
1416 :param msg: Message to process.
1425 The data is extracted from output.xml files generated by Jenkins jobs and
1426 stored in pandas' DataFrames.
1432 (as described in ExecutionChecker documentation)
1434 (as described in ExecutionChecker documentation)
1436 (as described in ExecutionChecker documentation)
1439 def __init__(self, spec):
1442 :param spec: Specification.
1443 :type spec: Specification
1450 self._input_data = pd.Series()
1454 """Getter - Input data.
1456 :returns: Input data
1457 :rtype: pandas.Series
1459 return self._input_data
1461 def metadata(self, job, build):
1462 """Getter - metadata
1464 :param job: Job which metadata we want.
1465 :param build: Build which metadata we want.
1469 :rtype: pandas.Series
1471 return self.data[job][build][u"metadata"]
1473 def suites(self, job, build):
1476 :param job: Job which suites we want.
1477 :param build: Build which suites we want.
1481 :rtype: pandas.Series
1483 return self.data[job][str(build)][u"suites"]
1485 def tests(self, job, build):
1488 :param job: Job which tests we want.
1489 :param build: Build which tests we want.
1493 :rtype: pandas.Series
1495 return self.data[job][build][u"tests"]
1497 def _parse_tests(self, job, build):
1498 """Process data from robot output.xml file and return JSON structured
1501 :param job: The name of job which build output data will be processed.
1502 :param build: The build which output data will be processed.
1505 :returns: JSON data structure.
1514 with open(build[u"file-name"], u'r') as data_file:
1516 result = ExecutionResult(data_file)
1517 except errors.DataError as err:
1519 f"Error occurred while parsing output.xml: {repr(err)}"
1522 checker = ExecutionChecker(
1523 metadata, self._cfg.mapping, self._cfg.ignore
1525 result.visit(checker)
1529 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1530 """Download and parse the input data file.
1532 :param pid: PID of the process executing this method.
1533 :param job: Name of the Jenkins job which generated the processed input
1535 :param build: Information about the Jenkins build which generated the
1536 processed input file.
1537 :param repeat: Repeat the download specified number of times if not
1545 logging.info(f"Processing the job/build: {job}: {build[u'build']}")
1552 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1558 f"It is not possible to download the input data file from the "
1559 f"job {job}, build {build[u'build']}, or it is damaged. "
1563 logging.info(f" Processing data from build {build[u'build']}")
1564 data = self._parse_tests(job, build)
1567 f"Input data file from the job {job}, build "
1568 f"{build[u'build']} is damaged. Skipped."
1571 state = u"processed"
1574 remove(build[u"file-name"])
1575 except OSError as err:
1577 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1580 # If the time-period is defined in the specification file, remove all
1581 # files which are outside the time period.
1583 timeperiod = self._cfg.environment.get(u"time-period", None)
1584 if timeperiod and data:
1586 timeperiod = timedelta(int(timeperiod))
1587 metadata = data.get(u"metadata", None)
1589 generated = metadata.get(u"generated", None)
1591 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1592 if (now - generated) > timeperiod:
1593 # Remove the data and the file:
1598 f" The build {job}/{build[u'build']} is "
1599 f"outdated, will be removed."
1609 def download_and_parse_data(self, repeat=1):
1610 """Download the input data files, parse input data from input files and
1611 store in pandas' Series.
1613 :param repeat: Repeat the download specified number of times if not
1618 logging.info(u"Downloading and parsing input files ...")
1620 for job, builds in self._cfg.input.items():
1621 for build in builds:
1623 result = self._download_and_parse_build(job, build, repeat)
1626 build_nr = result[u"build"][u"build"]
1629 data = result[u"data"]
1630 build_data = pd.Series({
1631 u"metadata": pd.Series(
1632 list(data[u"metadata"].values()),
1633 index=list(data[u"metadata"].keys())
1635 u"suites": pd.Series(
1636 list(data[u"suites"].values()),
1637 index=list(data[u"suites"].keys())
1639 u"tests": pd.Series(
1640 list(data[u"tests"].values()),
1641 index=list(data[u"tests"].keys())
1645 if self._input_data.get(job, None) is None:
1646 self._input_data[job] = pd.Series()
1647 self._input_data[job][str(build_nr)] = build_data
1648 self._cfg.set_input_file_name(
1649 job, build_nr, result[u"build"][u"file-name"]
1651 self._cfg.set_input_state(job, build_nr, result[u"state"])
1654 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1655 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1657 logging.info(u"Done.")
1659 msg = f"Successful downloads from the sources:\n"
1660 for source in self._cfg.environment[u"data-sources"]:
1661 if source[u"successful-downloads"]:
1663 f"{source[u'url']}/{source[u'path']}/"
1664 f"{source[u'file-name']}: "
1665 f"{source[u'successful-downloads']}\n"
1669 def process_local_file(self, local_file, job=u"local", build_nr=1,
1671 """Process local XML file given as a command-line parameter.
1673 :param local_file: The file to process.
1674 :param job: Job name.
1675 :param build_nr: Build number.
1676 :param replace: If True, the information about jobs and builds is
1677 replaced by the new one, otherwise the new jobs and builds are
1679 :type local_file: str
1683 :raises: PresentationError if an error occurs.
1685 if not isfile(local_file):
1686 raise PresentationError(f"The file {local_file} does not exist.")
1689 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1690 except (IndexError, ValueError):
1695 u"status": u"failed",
1696 u"file-name": local_file
1699 self._cfg.input = dict()
1700 self._cfg.add_build(job, build)
1702 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1703 data = self._parse_tests(job, build)
1705 raise PresentationError(
1706 f"Error occurred while parsing the file {local_file}"
1709 build_data = pd.Series({
1710 u"metadata": pd.Series(
1711 list(data[u"metadata"].values()),
1712 index=list(data[u"metadata"].keys())
1714 u"suites": pd.Series(
1715 list(data[u"suites"].values()),
1716 index=list(data[u"suites"].keys())
1718 u"tests": pd.Series(
1719 list(data[u"tests"].values()),
1720 index=list(data[u"tests"].keys())
1724 if self._input_data.get(job, None) is None:
1725 self._input_data[job] = pd.Series()
1726 self._input_data[job][str(build_nr)] = build_data
1728 self._cfg.set_input_state(job, build_nr, u"processed")
1730 def process_local_directory(self, local_dir, replace=True):
1731 """Process local directory with XML file(s). The directory is processed
1732 as a 'job' and the XML files in it as builds.
1733 If the given directory contains only sub-directories, these
1734 sub-directories processed as jobs and corresponding XML files as builds
1737 :param local_dir: Local directory to process.
1738 :param replace: If True, the information about jobs and builds is
1739 replaced by the new one, otherwise the new jobs and builds are
1741 :type local_dir: str
1744 if not isdir(local_dir):
1745 raise PresentationError(
1746 f"The directory {local_dir} does not exist."
1749 # Check if the given directory includes only files, or only directories
1750 _, dirnames, filenames = next(walk(local_dir))
1752 if filenames and not dirnames:
1755 # key: dir (job) name, value: list of file names (builds)
1757 local_dir: [join(local_dir, name) for name in filenames]
1760 elif dirnames and not filenames:
1763 # key: dir (job) name, value: list of file names (builds)
1764 local_builds = dict()
1765 for dirname in dirnames:
1767 join(local_dir, dirname, name)
1768 for name in listdir(join(local_dir, dirname))
1769 if isfile(join(local_dir, dirname, name))
1772 local_builds[dirname] = sorted(builds)
1774 elif not filenames and not dirnames:
1775 raise PresentationError(f"The directory {local_dir} is empty.")
1777 raise PresentationError(
1778 f"The directory {local_dir} can include only files or only "
1779 f"directories, not both.\nThe directory {local_dir} includes "
1780 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1784 self._cfg.input = dict()
1786 for job, files in local_builds.items():
1787 for idx, local_file in enumerate(files):
1788 self.process_local_file(local_file, job, idx + 1, replace=False)
1791 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1792 """Return the index of character in the string which is the end of tag.
1794 :param tag_filter: The string where the end of tag is being searched.
1795 :param start: The index where the searching is stated.
1796 :param closer: The character which is the tag closer.
1797 :type tag_filter: str
1800 :returns: The index of the tag closer.
1804 idx_opener = tag_filter.index(closer, start)
1805 return tag_filter.index(closer, idx_opener + 1)
1810 def _condition(tag_filter):
1811 """Create a conditional statement from the given tag filter.
1813 :param tag_filter: Filter based on tags from the element specification.
1814 :type tag_filter: str
1815 :returns: Conditional statement which can be evaluated.
1820 index = InputData._end_of_tag(tag_filter, index)
1824 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1826 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1827 continue_on_error=False):
1828 """Filter required data from the given jobs and builds.
1830 The output data structure is:
1833 - test (or suite) 1 ID:
1839 - test (or suite) n ID:
1846 :param element: Element which will use the filtered data.
1847 :param params: Parameters which will be included in the output. If None,
1848 all parameters are included.
1849 :param data: If not None, this data is used instead of data specified
1851 :param data_set: The set of data to be filtered: tests, suites,
1853 :param continue_on_error: Continue if there is error while reading the
1854 data. The Item will be empty then
1855 :type element: pandas.Series
1859 :type continue_on_error: bool
1860 :returns: Filtered data.
1861 :rtype pandas.Series
1865 if data_set == "suites":
1867 elif element[u"filter"] in (u"all", u"template"):
1870 cond = InputData._condition(element[u"filter"])
1871 logging.debug(f" Filter: {cond}")
1873 logging.error(u" No filter defined.")
1877 params = element.get(u"parameters", None)
1879 params.append(u"type")
1881 data_to_filter = data if data else element[u"data"]
1884 for job, builds in data_to_filter.items():
1885 data[job] = pd.Series()
1886 for build in builds:
1887 data[job][str(build)] = pd.Series()
1890 self.data[job][str(build)][data_set].items())
1892 if continue_on_error:
1896 for test_id, test_data in data_dict.items():
1897 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1898 data[job][str(build)][test_id] = pd.Series()
1900 for param, val in test_data.items():
1901 data[job][str(build)][test_id][param] = val
1903 for param in params:
1905 data[job][str(build)][test_id][param] =\
1908 data[job][str(build)][test_id][param] =\
1912 except (KeyError, IndexError, ValueError) as err:
1914 f"Missing mandatory parameter in the element specification: "
1918 except AttributeError as err:
1919 logging.error(repr(err))
1921 except SyntaxError as err:
1923 f"The filter {cond} is not correct. Check if all tags are "
1924 f"enclosed by apostrophes.\n{repr(err)}"
1928 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1929 continue_on_error=False):
1930 """Filter required data from the given jobs and builds.
1932 The output data structure is:
1935 - test (or suite) 1 ID:
1941 - test (or suite) n ID:
1948 :param element: Element which will use the filtered data.
1949 :param params: Parameters which will be included in the output. If None,
1950 all parameters are included.
1951 :param data_set: The set of data to be filtered: tests, suites,
1953 :param continue_on_error: Continue if there is error while reading the
1954 data. The Item will be empty then
1955 :type element: pandas.Series
1958 :type continue_on_error: bool
1959 :returns: Filtered data.
1960 :rtype pandas.Series
1963 include = element.get(u"include", None)
1965 logging.warning(u"No tests to include, skipping the element.")
1969 params = element.get(u"parameters", None)
1970 if params and u"type" not in params:
1971 params.append(u"type")
1973 cores = element.get(u"core", None)
1977 for test in include:
1978 tests.append(test.format(core=core))
1984 for job, builds in element[u"data"].items():
1985 data[job] = pd.Series()
1986 for build in builds:
1987 data[job][str(build)] = pd.Series()
1990 reg_ex = re.compile(str(test).lower())
1991 for test_id in self.data[job][
1992 str(build)][data_set].keys():
1993 if re.match(reg_ex, str(test_id).lower()):
1994 test_data = self.data[job][
1995 str(build)][data_set][test_id]
1996 data[job][str(build)][test_id] = pd.Series()
1998 for param, val in test_data.items():
1999 data[job][str(build)][test_id]\
2002 for param in params:
2004 data[job][str(build)][
2008 data[job][str(build)][
2009 test_id][param] = u"No Data"
2010 except KeyError as err:
2011 if continue_on_error:
2012 logging.debug(repr(err))
2014 logging.error(repr(err))
2018 except (KeyError, IndexError, ValueError) as err:
2020 f"Missing mandatory parameter in the element "
2021 f"specification: {repr(err)}"
2024 except AttributeError as err:
2025 logging.error(repr(err))
2029 def merge_data(data):
2030 """Merge data from more jobs and builds to a simple data structure.
2032 The output data structure is:
2034 - test (suite) 1 ID:
2040 - test (suite) n ID:
2043 :param data: Data to merge.
2044 :type data: pandas.Series
2045 :returns: Merged data.
2046 :rtype: pandas.Series
2049 logging.info(u" Merging data ...")
2051 merged_data = pd.Series()
2052 for builds in data.values:
2053 for item in builds.values:
2054 for item_id, item_data in item.items():
2055 merged_data[item_id] = item_data
2058 def print_all_oper_data(self):
2059 """Print all operational data to console.
2067 u"Cycles per Packet",
2068 u"Average Vector Size"
2071 for job in self._input_data.values:
2072 for build in job.values:
2073 for test_id, test_data in build[u"tests"].items():
2075 if test_data.get(u"show-run", None) is None:
2077 for dut_name, data in test_data[u"show-run"].items():
2078 if data.get(u"threads", None) is None:
2080 print(f"Host IP: {data.get(u'host', '')}, "
2081 f"Socket: {data.get(u'socket', '')}")
2082 for thread_nr, thread in data[u"threads"].items():
2083 txt_table = prettytable.PrettyTable(tbl_hdr)
2086 txt_table.add_row(row)
2088 if len(thread) == 0:
2091 avg = f", Average Vector Size per Node: " \
2092 f"{(avg / len(thread)):.2f}"
2093 th_name = u"main" if thread_nr == 0 \
2094 else f"worker_{thread_nr}"
2095 print(f"{dut_name}, {th_name}{avg}")
2096 txt_table.float_format = u".2"
2097 txt_table.align = u"r"
2098 txt_table.align[u"Name"] = u"l"
2099 print(f"{txt_table.get_string()}\n")