1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_CPS_MSG_INFO = re.compile(
233 r'NDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*\n'
234 r'PDR_LOWER:\s(\d+.\d+)\s.*\s.*\n.*\n.*'
236 REGEX_PPS_MSG_INFO = re.compile(
237 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
238 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*'
240 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
242 # Needed for CPS and PPS tests
243 REGEX_NDRPDR_LAT_BASE = re.compile(
244 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
245 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
247 REGEX_NDRPDR_LAT = re.compile(
248 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
249 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
250 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
251 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
256 REGEX_VERSION_VPP = re.compile(
257 r"(return STDOUT Version:\s*|"
258 r"VPP Version:\s*|VPP version:\s*)(.*)"
260 REGEX_VERSION_DPDK = re.compile(
261 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
263 REGEX_TCP = re.compile(
264 r'Total\s(rps|cps|throughput):\s(\d*).*$'
266 REGEX_MRR = re.compile(
267 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
268 r'tx\s(\d*),\srx\s(\d*)'
270 REGEX_BMRR = re.compile(
271 r'Maximum Receive Rate trial results .*: \[(.*)\]'
273 REGEX_RECONF_LOSS = re.compile(
274 r'Packets lost due to reconfig: (\d*)'
276 REGEX_RECONF_TIME = re.compile(
277 r'Implied time lost: (\d*.[\de-]*)'
279 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
281 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
283 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
285 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
287 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
289 def __init__(self, metadata, mapping, ignore):
292 :param metadata: Key-value pairs to be included in "metadata" part of
294 :param mapping: Mapping of the old names of test cases to the new
296 :param ignore: List of TCs to be ignored.
302 # Type of message to parse out from the test messages
303 self._msg_type = None
309 self._timestamp = None
311 # Testbed. The testbed is identified by TG node IP address.
314 # Mapping of TCs long names
315 self._mapping = mapping
318 self._ignore = ignore
320 # Number of PAPI History messages found:
322 # 1 - PAPI History of DUT1
323 # 2 - PAPI History of DUT2
324 self._conf_history_lookup_nr = 0
326 self._sh_run_counter = 0
328 # Test ID of currently processed test- the lowercase full path to the
332 # The main data structure
334 u"metadata": OrderedDict(),
335 u"suites": OrderedDict(),
336 u"tests": OrderedDict()
339 # Save the provided metadata
340 for key, val in metadata.items():
341 self._data[u"metadata"][key] = val
343 # Dictionary defining the methods used to parse different types of
346 u"timestamp": self._get_timestamp,
347 u"vpp-version": self._get_vpp_version,
348 u"dpdk-version": self._get_dpdk_version,
349 # TODO: Remove when not needed:
350 u"teardown-vat-history": self._get_vat_history,
351 u"teardown-papi-history": self._get_papi_history,
352 u"test-show-runtime": self._get_show_run,
353 u"testbed": self._get_testbed
358 """Getter - Data parsed from the XML file.
360 :returns: Data parsed from the XML file.
365 def _get_data_from_mrr_test_msg(self, msg):
366 """Get info from message of MRR performance tests.
368 :param msg: Message to be processed.
370 :returns: Processed message or original message if a problem occurs.
374 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
375 if not groups or groups.lastindex != 1:
376 return u"Test Failed."
379 data = groups.group(1).split(u", ")
380 except (AttributeError, IndexError, ValueError, KeyError):
381 return u"Test Failed."
386 out_str += f"{(float(item) / 1e6):.2f}, "
387 return out_str[:-2] + u"]"
388 except (AttributeError, IndexError, ValueError, KeyError):
389 return u"Test Failed."
391 def _get_data_from_cps_test_msg(self, msg):
392 """Get info from message of NDRPDR CPS tests.
394 :param msg: Message to be processed.
396 :returns: Processed message or "Test Failed." if a problem occurs.
400 groups = re.search(self.REGEX_CPS_MSG_INFO, msg)
401 if not groups or groups.lastindex != 2:
402 return u"Test Failed."
406 f"1. {(float(groups.group(1)) / 1e6):5.2f}\n"
407 f"2. {(float(groups.group(2)) / 1e6):5.2f}"
409 except (AttributeError, IndexError, ValueError, KeyError):
410 return u"Test Failed."
412 def _get_data_from_pps_test_msg(self, msg):
413 """Get info from message of NDRPDR PPS tests.
415 :param msg: Message to be processed.
417 :returns: Processed message or "Test Failed." if a problem occurs.
421 groups = re.search(self.REGEX_PPS_MSG_INFO, msg)
422 if not groups or groups.lastindex != 4:
423 return u"Test Failed."
427 f"1. {(float(groups.group(1)) / 1e6):5.2f} "
428 f"{float(groups.group(2)):5.2f}\n"
429 f"2. {(float(groups.group(3)) / 1e6):5.2f} "
430 f"{float(groups.group(4)):5.2f}"
432 except (AttributeError, IndexError, ValueError, KeyError):
433 return u"Test Failed."
435 def _get_data_from_perf_test_msg(self, msg):
436 """Get info from message of NDRPDR performance tests.
438 :param msg: Message to be processed.
440 :returns: Processed message or "Test Failed." if a problem occurs.
444 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
445 if not groups or groups.lastindex != 10:
446 return u"Test Failed."
450 u"ndr_low": float(groups.group(1)),
451 u"ndr_low_b": float(groups.group(2)),
452 u"pdr_low": float(groups.group(3)),
453 u"pdr_low_b": float(groups.group(4)),
454 u"pdr_lat_90_1": groups.group(5),
455 u"pdr_lat_90_2": groups.group(6),
456 u"pdr_lat_50_1": groups.group(7),
457 u"pdr_lat_50_2": groups.group(8),
458 u"pdr_lat_10_1": groups.group(9),
459 u"pdr_lat_10_2": groups.group(10),
461 except (AttributeError, IndexError, ValueError, KeyError):
462 return u"Test Failed."
464 def _process_lat(in_str_1, in_str_2):
465 """Extract min, avg, max values from latency string.
467 :param in_str_1: Latency string for one direction produced by robot
469 :param in_str_2: Latency string for second direction produced by
473 :returns: Processed latency string or None if a problem occurs.
476 in_list_1 = in_str_1.split('/', 3)
477 in_list_2 = in_str_2.split('/', 3)
479 if len(in_list_1) != 4 and len(in_list_2) != 4:
482 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
484 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
485 except hdrh.codec.HdrLengthException:
488 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
490 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
491 except hdrh.codec.HdrLengthException:
494 if hdr_lat_1 and hdr_lat_2:
496 hdr_lat_1.get_value_at_percentile(50.0),
497 hdr_lat_1.get_value_at_percentile(90.0),
498 hdr_lat_1.get_value_at_percentile(99.0),
499 hdr_lat_2.get_value_at_percentile(50.0),
500 hdr_lat_2.get_value_at_percentile(90.0),
501 hdr_lat_2.get_value_at_percentile(99.0)
511 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
512 f"{data[u'ndr_low_b']:5.2f}"
513 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
514 f"{data[u'pdr_low_b']:5.2f}"
517 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
518 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
519 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
522 max_len = len(str(max((max(item) for item in latency))))
523 max_len = 4 if max_len < 4 else max_len
525 for idx, lat in enumerate(latency):
530 f"{lat[0]:{max_len}d} "
531 f"{lat[1]:{max_len}d} "
532 f"{lat[2]:{max_len}d} "
533 f"{lat[3]:{max_len}d} "
534 f"{lat[4]:{max_len}d} "
535 f"{lat[5]:{max_len}d} "
540 except (AttributeError, IndexError, ValueError, KeyError):
541 return u"Test Failed."
543 def _get_testbed(self, msg):
544 """Called when extraction of testbed IP is required.
545 The testbed is identified by TG node IP address.
547 :param msg: Message to process.
552 if msg.message.count(u"Setup of TG node") or \
553 msg.message.count(u"Setup of node TG host"):
554 reg_tg_ip = re.compile(
555 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
557 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
558 except (KeyError, ValueError, IndexError, AttributeError):
561 self._data[u"metadata"][u"testbed"] = self._testbed
562 self._msg_type = None
564 def _get_vpp_version(self, msg):
565 """Called when extraction of VPP version is required.
567 :param msg: Message to process.
572 if msg.message.count(u"return STDOUT Version:") or \
573 msg.message.count(u"VPP Version:") or \
574 msg.message.count(u"VPP version:"):
575 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
577 self._data[u"metadata"][u"version"] = self._version
578 self._msg_type = None
580 def _get_dpdk_version(self, msg):
581 """Called when extraction of DPDK version is required.
583 :param msg: Message to process.
588 if msg.message.count(u"DPDK Version:"):
590 self._version = str(re.search(
591 self.REGEX_VERSION_DPDK, msg.message).group(2))
592 self._data[u"metadata"][u"version"] = self._version
596 self._msg_type = None
598 def _get_timestamp(self, msg):
599 """Called when extraction of timestamp is required.
601 :param msg: Message to process.
606 self._timestamp = msg.timestamp[:14]
607 self._data[u"metadata"][u"generated"] = self._timestamp
608 self._msg_type = None
610 def _get_vat_history(self, msg):
611 """Called when extraction of VAT command history is required.
613 TODO: Remove when not needed.
615 :param msg: Message to process.
619 if msg.message.count(u"VAT command history:"):
620 self._conf_history_lookup_nr += 1
621 if self._conf_history_lookup_nr == 1:
622 self._data[u"tests"][self._test_id][u"conf-history"] = str()
624 self._msg_type = None
625 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
626 r"VAT command history:", u"",
627 msg.message, count=1).replace(u'\n', u' |br| ').\
630 self._data[u"tests"][self._test_id][u"conf-history"] += (
631 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
634 def _get_papi_history(self, msg):
635 """Called when extraction of PAPI command history is required.
637 :param msg: Message to process.
641 if msg.message.count(u"PAPI command history:"):
642 self._conf_history_lookup_nr += 1
643 if self._conf_history_lookup_nr == 1:
644 self._data[u"tests"][self._test_id][u"conf-history"] = str()
646 self._msg_type = None
647 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
648 r"PAPI command history:", u"",
649 msg.message, count=1).replace(u'\n', u' |br| ').\
651 self._data[u"tests"][self._test_id][u"conf-history"] += (
652 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
655 def _get_show_run(self, msg):
656 """Called when extraction of VPP operational data (output of CLI command
657 Show Runtime) is required.
659 :param msg: Message to process.
664 if not msg.message.count(u"stats runtime"):
668 if self._sh_run_counter > 1:
671 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
672 self._data[u"tests"][self._test_id][u"show-run"] = dict()
674 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
678 host = groups.group(1)
679 except (AttributeError, IndexError):
682 sock = groups.group(2)
683 except (AttributeError, IndexError):
686 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
687 replace(u"'", u'"').replace(u'b"', u'"').
688 replace(u'u"', u'"').split(u":", 1)[1])
691 threads_nr = len(runtime[0][u"clocks"])
692 except (IndexError, KeyError):
695 dut = u"DUT{nr}".format(
696 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
701 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
705 for idx in range(threads_nr):
706 if item[u"vectors"][idx] > 0:
707 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
708 elif item[u"calls"][idx] > 0:
709 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
710 elif item[u"suspends"][idx] > 0:
711 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
715 if item[u"calls"][idx] > 0:
716 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
720 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
721 int(item[u"suspends"][idx]):
722 oper[u"threads"][idx].append([
725 item[u"vectors"][idx],
726 item[u"suspends"][idx],
731 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
733 def _get_ndrpdr_throughput(self, msg):
734 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
737 :param msg: The test message to be parsed.
739 :returns: Parsed data as a dict and the status (PASS/FAIL).
740 :rtype: tuple(dict, str)
744 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
745 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
748 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
750 if groups is not None:
752 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
753 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
754 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
755 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
757 except (IndexError, ValueError):
760 return throughput, status
762 def _get_ndrpdr_throughput_gbps(self, msg):
763 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
766 :param msg: The test message to be parsed.
768 :returns: Parsed data as a dict and the status (PASS/FAIL).
769 :rtype: tuple(dict, str)
773 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
774 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
777 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
779 if groups is not None:
781 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
782 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
783 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
784 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
786 except (IndexError, ValueError):
791 def _get_plr_throughput(self, msg):
792 """Get PLRsearch lower bound and PLRsearch upper bound from the test
795 :param msg: The test message to be parsed.
797 :returns: Parsed data as a dict and the status (PASS/FAIL).
798 :rtype: tuple(dict, str)
806 groups = re.search(self.REGEX_PLR_RATE, msg)
808 if groups is not None:
810 throughput[u"LOWER"] = float(groups.group(1))
811 throughput[u"UPPER"] = float(groups.group(2))
813 except (IndexError, ValueError):
816 return throughput, status
818 def _get_ndrpdr_latency(self, msg):
819 """Get LATENCY from the test message.
821 :param msg: The test message to be parsed.
823 :returns: Parsed data as a dict and the status (PASS/FAIL).
824 :rtype: tuple(dict, str)
834 u"direction1": copy.copy(latency_default),
835 u"direction2": copy.copy(latency_default)
838 u"direction1": copy.copy(latency_default),
839 u"direction2": copy.copy(latency_default)
842 u"direction1": copy.copy(latency_default),
843 u"direction2": copy.copy(latency_default)
846 u"direction1": copy.copy(latency_default),
847 u"direction2": copy.copy(latency_default)
850 u"direction1": copy.copy(latency_default),
851 u"direction2": copy.copy(latency_default)
854 u"direction1": copy.copy(latency_default),
855 u"direction2": copy.copy(latency_default)
859 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
861 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
863 return latency, u"FAIL"
865 def process_latency(in_str):
866 """Return object with parsed latency values.
868 TODO: Define class for the return type.
870 :param in_str: Input string, min/avg/max/hdrh format.
872 :returns: Dict with corresponding keys, except hdrh float values.
874 :throws IndexError: If in_str does not have enough substrings.
875 :throws ValueError: If a substring does not convert to float.
877 in_list = in_str.split('/', 3)
880 u"min": float(in_list[0]),
881 u"avg": float(in_list[1]),
882 u"max": float(in_list[2]),
886 if len(in_list) == 4:
887 rval[u"hdrh"] = str(in_list[3])
892 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
893 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
894 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
895 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
896 if groups.lastindex == 4:
897 return latency, u"PASS"
898 except (IndexError, ValueError):
902 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
903 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
904 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
905 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
906 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
907 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
908 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
909 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
910 if groups.lastindex == 12:
911 return latency, u"PASS"
912 except (IndexError, ValueError):
915 # TODO: Remove when not needed
916 latency[u"NDR10"] = {
917 u"direction1": copy.copy(latency_default),
918 u"direction2": copy.copy(latency_default)
920 latency[u"NDR50"] = {
921 u"direction1": copy.copy(latency_default),
922 u"direction2": copy.copy(latency_default)
924 latency[u"NDR90"] = {
925 u"direction1": copy.copy(latency_default),
926 u"direction2": copy.copy(latency_default)
929 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
930 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
931 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
932 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
933 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
934 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
935 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
936 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
937 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
938 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
939 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
940 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
941 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
942 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
943 return latency, u"PASS"
944 except (IndexError, ValueError):
947 return latency, u"FAIL"
950 def _get_hoststack_data(msg, tags):
951 """Get data from the hoststack test message.
953 :param msg: The test message to be parsed.
954 :param tags: Test tags.
957 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
958 :rtype: tuple(dict, str)
963 msg = msg.replace(u"'", u'"').replace(u" ", u"")
964 if u"LDPRELOAD" in tags:
968 except JSONDecodeError:
970 elif u"VPPECHO" in tags:
972 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
974 client=loads(msg_lst[0]),
975 server=loads(msg_lst[1])
978 except (JSONDecodeError, IndexError):
981 return result, status
983 def visit_suite(self, suite):
984 """Implements traversing through the suite and its direct children.
986 :param suite: Suite to process.
990 if self.start_suite(suite) is not False:
991 suite.suites.visit(self)
992 suite.tests.visit(self)
993 self.end_suite(suite)
995 def start_suite(self, suite):
996 """Called when suite starts.
998 :param suite: Suite to process.
1004 parent_name = suite.parent.name
1005 except AttributeError:
1008 doc_str = suite.doc.\
1009 replace(u'"', u"'").\
1010 replace(u'\n', u' ').\
1011 replace(u'\r', u'').\
1012 replace(u'*[', u' |br| *[').\
1013 replace(u"*", u"**").\
1014 replace(u' |br| *[', u'*[', 1)
1016 self._data[u"suites"][suite.longname.lower().
1017 replace(u'"', u"'").
1018 replace(u" ", u"_")] = {
1019 u"name": suite.name.lower(),
1021 u"parent": parent_name,
1022 u"level": len(suite.longname.split(u"."))
1025 suite.keywords.visit(self)
1027 def end_suite(self, suite):
1028 """Called when suite ends.
1030 :param suite: Suite to process.
1035 def visit_test(self, test):
1036 """Implements traversing through the test.
1038 :param test: Test to process.
1042 if self.start_test(test) is not False:
1043 test.keywords.visit(self)
1046 def start_test(self, test):
1047 """Called when test starts.
1049 :param test: Test to process.
1054 self._sh_run_counter = 0
1056 longname_orig = test.longname.lower()
1058 # Check the ignore list
1059 if longname_orig in self._ignore:
1062 tags = [str(tag) for tag in test.tags]
1063 test_result = dict()
1065 # Change the TC long name and name if defined in the mapping table
1066 longname = self._mapping.get(longname_orig, None)
1067 if longname is not None:
1068 name = longname.split(u'.')[-1]
1070 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1074 longname = longname_orig
1075 name = test.name.lower()
1077 # Remove TC number from the TC long name (backward compatibility):
1078 self._test_id = re.sub(
1079 self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
1081 # Remove TC number from the TC name (not needed):
1082 test_result[u"name"] = re.sub(
1083 self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
1086 test_result[u"parent"] = test.parent.name.lower().\
1087 replace(u"snat", u"nat")
1088 test_result[u"tags"] = tags
1089 test_result["doc"] = test.doc.\
1090 replace(u'"', u"'").\
1091 replace(u'\n', u' ').\
1092 replace(u'\r', u'').\
1093 replace(u'[', u' |br| [').\
1094 replace(u' |br| [', u'[', 1)
1095 test_result[u"type"] = u"FUNC"
1096 test_result[u"status"] = test.status
1098 if test.status == u"PASS":
1099 if u"NDRPDR" in tags:
1100 if u"TCP_PPS" in tags or u"UDP_PPS" in tags:
1101 test_result[u"msg"] = self._get_data_from_pps_test_msg(
1102 test.message).replace(u'\n', u' |br| '). \
1103 replace(u'\r', u'').replace(u'"', u"'")
1104 elif u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1105 test_result[u"msg"] = self._get_data_from_cps_test_msg(
1106 test.message).replace(u'\n', u' |br| '). \
1107 replace(u'\r', u'').replace(u'"', u"'")
1109 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1110 test.message).replace(u'\n', u' |br| ').\
1111 replace(u'\r', u'').replace(u'"', u"'")
1112 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1113 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1114 test.message).replace(u'\n', u' |br| ').\
1115 replace(u'\r', u'').replace(u'"', u"'")
1117 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1118 replace(u'\r', u'').replace(u'"', u"'")
1120 test_result[u"msg"] = u"Test Failed."
1122 if u"PERFTEST" in tags:
1123 # Replace info about cores (e.g. -1c-) with the info about threads
1124 # and cores (e.g. -1t1c-) in the long test case names and in the
1125 # test case names if necessary.
1126 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1130 for tag in test_result[u"tags"]:
1131 groups = re.search(self.REGEX_TC_TAG, tag)
1137 self._test_id = re.sub(
1138 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1139 self._test_id, count=1
1141 test_result[u"name"] = re.sub(
1142 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1143 test_result["name"], count=1
1146 test_result[u"status"] = u"FAIL"
1147 self._data[u"tests"][self._test_id] = test_result
1149 f"The test {self._test_id} has no or more than one "
1150 f"multi-threading tags.\n"
1151 f"Tags: {test_result[u'tags']}"
1155 if test.status == u"PASS":
1156 if u"DEVICETEST" in tags:
1157 test_result[u"type"] = u"DEVICETEST"
1158 elif u"NDRPDR" in tags:
1159 if u"TCP_CPS" in tags or u"UDP_CPS" in tags:
1160 test_result[u"type"] = u"CPS"
1162 test_result[u"type"] = u"NDRPDR"
1163 test_result[u"throughput"], test_result[u"status"] = \
1164 self._get_ndrpdr_throughput(test.message)
1165 test_result[u"gbps"], test_result[u"status"] = \
1166 self._get_ndrpdr_throughput_gbps(test.message)
1167 test_result[u"latency"], test_result[u"status"] = \
1168 self._get_ndrpdr_latency(test.message)
1169 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1171 test_result[u"type"] = u"MRR"
1173 test_result[u"type"] = u"BMRR"
1175 test_result[u"result"] = dict()
1176 groups = re.search(self.REGEX_BMRR, test.message)
1177 if groups is not None:
1178 items_str = groups.group(1)
1180 float(item.strip()) for item in items_str.split(",")
1182 # Use whole list in CSIT-1180.
1183 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1184 test_result[u"result"][u"receive-rate"] = stats.avg
1185 test_result[u"result"][u"receive-stdev"] = stats.stdev
1187 groups = re.search(self.REGEX_MRR, test.message)
1188 test_result[u"result"][u"receive-rate"] = \
1189 float(groups.group(3)) / float(groups.group(1))
1190 elif u"SOAK" in tags:
1191 test_result[u"type"] = u"SOAK"
1192 test_result[u"throughput"], test_result[u"status"] = \
1193 self._get_plr_throughput(test.message)
1194 elif u"HOSTSTACK" in tags:
1195 test_result[u"type"] = u"HOSTSTACK"
1196 test_result[u"result"], test_result[u"status"] = \
1197 self._get_hoststack_data(test.message, tags)
1198 elif u"TCP" in tags:
1199 test_result[u"type"] = u"TCP"
1200 groups = re.search(self.REGEX_TCP, test.message)
1201 test_result[u"result"] = int(groups.group(2))
1202 elif u"RECONF" in tags:
1203 test_result[u"type"] = u"RECONF"
1204 test_result[u"result"] = None
1206 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1207 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1208 test_result[u"result"] = {
1209 u"loss": int(grps_loss.group(1)),
1210 u"time": float(grps_time.group(1))
1212 except (AttributeError, IndexError, ValueError, TypeError):
1213 test_result[u"status"] = u"FAIL"
1215 test_result[u"status"] = u"FAIL"
1216 self._data[u"tests"][self._test_id] = test_result
1219 self._data[u"tests"][self._test_id] = test_result
1221 def end_test(self, test):
1222 """Called when test ends.
1224 :param test: Test to process.
1229 def visit_keyword(self, keyword):
1230 """Implements traversing through the keyword and its child keywords.
1232 :param keyword: Keyword to process.
1233 :type keyword: Keyword
1236 if self.start_keyword(keyword) is not False:
1237 self.end_keyword(keyword)
1239 def start_keyword(self, keyword):
1240 """Called when keyword starts. Default implementation does nothing.
1242 :param keyword: Keyword to process.
1243 :type keyword: Keyword
1247 if keyword.type == u"setup":
1248 self.visit_setup_kw(keyword)
1249 elif keyword.type == u"teardown":
1250 self.visit_teardown_kw(keyword)
1252 self.visit_test_kw(keyword)
1253 except AttributeError:
1256 def end_keyword(self, keyword):
1257 """Called when keyword ends. Default implementation does nothing.
1259 :param keyword: Keyword to process.
1260 :type keyword: Keyword
1264 def visit_test_kw(self, test_kw):
1265 """Implements traversing through the test keyword and its child
1268 :param test_kw: Keyword to process.
1269 :type test_kw: Keyword
1272 for keyword in test_kw.keywords:
1273 if self.start_test_kw(keyword) is not False:
1274 self.visit_test_kw(keyword)
1275 self.end_test_kw(keyword)
1277 def start_test_kw(self, test_kw):
1278 """Called when test keyword starts. Default implementation does
1281 :param test_kw: Keyword to process.
1282 :type test_kw: Keyword
1285 if test_kw.name.count(u"Show Runtime On All Duts") or \
1286 test_kw.name.count(u"Show Runtime Counters On All Duts") or \
1287 test_kw.name.count(u"Vpp Show Runtime On All Duts"):
1288 self._msg_type = u"test-show-runtime"
1289 self._sh_run_counter += 1
1292 test_kw.messages.visit(self)
1294 def end_test_kw(self, test_kw):
1295 """Called when keyword ends. Default implementation does nothing.
1297 :param test_kw: Keyword to process.
1298 :type test_kw: Keyword
1302 def visit_setup_kw(self, setup_kw):
1303 """Implements traversing through the teardown keyword and its child
1306 :param setup_kw: Keyword to process.
1307 :type setup_kw: Keyword
1310 for keyword in setup_kw.keywords:
1311 if self.start_setup_kw(keyword) is not False:
1312 self.visit_setup_kw(keyword)
1313 self.end_setup_kw(keyword)
1315 def start_setup_kw(self, setup_kw):
1316 """Called when teardown keyword starts. Default implementation does
1319 :param setup_kw: Keyword to process.
1320 :type setup_kw: Keyword
1323 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1324 and not self._version:
1325 self._msg_type = u"vpp-version"
1326 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1328 self._msg_type = u"dpdk-version"
1329 elif setup_kw.name.count(u"Set Global Variable") \
1330 and not self._timestamp:
1331 self._msg_type = u"timestamp"
1332 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1333 self._msg_type = u"testbed"
1336 setup_kw.messages.visit(self)
1338 def end_setup_kw(self, setup_kw):
1339 """Called when keyword ends. Default implementation does nothing.
1341 :param setup_kw: Keyword to process.
1342 :type setup_kw: Keyword
1346 def visit_teardown_kw(self, teardown_kw):
1347 """Implements traversing through the teardown keyword and its child
1350 :param teardown_kw: Keyword to process.
1351 :type teardown_kw: Keyword
1354 for keyword in teardown_kw.keywords:
1355 if self.start_teardown_kw(keyword) is not False:
1356 self.visit_teardown_kw(keyword)
1357 self.end_teardown_kw(keyword)
1359 def start_teardown_kw(self, teardown_kw):
1360 """Called when teardown keyword starts
1362 :param teardown_kw: Keyword to process.
1363 :type teardown_kw: Keyword
1367 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1368 # TODO: Remove when not needed:
1369 self._conf_history_lookup_nr = 0
1370 self._msg_type = u"teardown-vat-history"
1371 teardown_kw.messages.visit(self)
1372 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1373 self._conf_history_lookup_nr = 0
1374 self._msg_type = u"teardown-papi-history"
1375 teardown_kw.messages.visit(self)
1377 def end_teardown_kw(self, teardown_kw):
1378 """Called when keyword ends. Default implementation does nothing.
1380 :param teardown_kw: Keyword to process.
1381 :type teardown_kw: Keyword
1385 def visit_message(self, msg):
1386 """Implements visiting the message.
1388 :param msg: Message to process.
1392 if self.start_message(msg) is not False:
1393 self.end_message(msg)
1395 def start_message(self, msg):
1396 """Called when message starts. Get required information from messages:
1399 :param msg: Message to process.
1404 self.parse_msg[self._msg_type](msg)
1406 def end_message(self, msg):
1407 """Called when message ends. Default implementation does nothing.
1409 :param msg: Message to process.
1418 The data is extracted from output.xml files generated by Jenkins jobs and
1419 stored in pandas' DataFrames.
1425 (as described in ExecutionChecker documentation)
1427 (as described in ExecutionChecker documentation)
1429 (as described in ExecutionChecker documentation)
1432 def __init__(self, spec):
1435 :param spec: Specification.
1436 :type spec: Specification
1443 self._input_data = pd.Series()
1447 """Getter - Input data.
1449 :returns: Input data
1450 :rtype: pandas.Series
1452 return self._input_data
1454 def metadata(self, job, build):
1455 """Getter - metadata
1457 :param job: Job which metadata we want.
1458 :param build: Build which metadata we want.
1462 :rtype: pandas.Series
1464 return self.data[job][build][u"metadata"]
1466 def suites(self, job, build):
1469 :param job: Job which suites we want.
1470 :param build: Build which suites we want.
1474 :rtype: pandas.Series
1476 return self.data[job][str(build)][u"suites"]
1478 def tests(self, job, build):
1481 :param job: Job which tests we want.
1482 :param build: Build which tests we want.
1486 :rtype: pandas.Series
1488 return self.data[job][build][u"tests"]
1490 def _parse_tests(self, job, build):
1491 """Process data from robot output.xml file and return JSON structured
1494 :param job: The name of job which build output data will be processed.
1495 :param build: The build which output data will be processed.
1498 :returns: JSON data structure.
1507 with open(build[u"file-name"], u'r') as data_file:
1509 result = ExecutionResult(data_file)
1510 except errors.DataError as err:
1512 f"Error occurred while parsing output.xml: {repr(err)}"
1515 checker = ExecutionChecker(metadata, self._cfg.mapping,
1517 result.visit(checker)
1521 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1522 """Download and parse the input data file.
1524 :param pid: PID of the process executing this method.
1525 :param job: Name of the Jenkins job which generated the processed input
1527 :param build: Information about the Jenkins build which generated the
1528 processed input file.
1529 :param repeat: Repeat the download specified number of times if not
1537 logging.info(f" Processing the job/build: {job}: {build[u'build']}")
1544 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1550 f"It is not possible to download the input data file from the "
1551 f"job {job}, build {build[u'build']}, or it is damaged. "
1555 logging.info(f" Processing data from build {build[u'build']}")
1556 data = self._parse_tests(job, build)
1559 f"Input data file from the job {job}, build "
1560 f"{build[u'build']} is damaged. Skipped."
1563 state = u"processed"
1566 remove(build[u"file-name"])
1567 except OSError as err:
1569 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1572 # If the time-period is defined in the specification file, remove all
1573 # files which are outside the time period.
1575 timeperiod = self._cfg.input.get(u"time-period", None)
1576 if timeperiod and data:
1578 timeperiod = timedelta(int(timeperiod))
1579 metadata = data.get(u"metadata", None)
1581 generated = metadata.get(u"generated", None)
1583 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1584 if (now - generated) > timeperiod:
1585 # Remove the data and the file:
1590 f" The build {job}/{build[u'build']} is "
1591 f"outdated, will be removed."
1593 logging.info(u" Done.")
1603 def download_and_parse_data(self, repeat=1):
1604 """Download the input data files, parse input data from input files and
1605 store in pandas' Series.
1607 :param repeat: Repeat the download specified number of times if not
1612 logging.info(u"Downloading and parsing input files ...")
1614 for job, builds in self._cfg.builds.items():
1615 for build in builds:
1617 result = self._download_and_parse_build(job, build, repeat)
1620 build_nr = result[u"build"][u"build"]
1623 data = result[u"data"]
1624 build_data = pd.Series({
1625 u"metadata": pd.Series(
1626 list(data[u"metadata"].values()),
1627 index=list(data[u"metadata"].keys())
1629 u"suites": pd.Series(
1630 list(data[u"suites"].values()),
1631 index=list(data[u"suites"].keys())
1633 u"tests": pd.Series(
1634 list(data[u"tests"].values()),
1635 index=list(data[u"tests"].keys())
1639 if self._input_data.get(job, None) is None:
1640 self._input_data[job] = pd.Series()
1641 self._input_data[job][str(build_nr)] = build_data
1643 self._cfg.set_input_file_name(
1644 job, build_nr, result[u"build"][u"file-name"])
1646 self._cfg.set_input_state(job, build_nr, result[u"state"])
1649 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1650 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1652 logging.info(u"Done.")
1654 def process_local_file(self, local_file, job=u"local", build_nr=1,
1656 """Process local XML file given as a command-line parameter.
1658 :param local_file: The file to process.
1659 :param job: Job name.
1660 :param build_nr: Build number.
1661 :param replace: If True, the information about jobs and builds is
1662 replaced by the new one, otherwise the new jobs and builds are
1664 :type local_file: str
1668 :raises: PresentationError if an error occurs.
1670 if not isfile(local_file):
1671 raise PresentationError(f"The file {local_file} does not exist.")
1674 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1675 except (IndexError, ValueError):
1680 u"status": u"failed",
1681 u"file-name": local_file
1684 self._cfg.builds = dict()
1685 self._cfg.add_build(job, build)
1687 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1688 data = self._parse_tests(job, build)
1690 raise PresentationError(
1691 f"Error occurred while parsing the file {local_file}"
1694 build_data = pd.Series({
1695 u"metadata": pd.Series(
1696 list(data[u"metadata"].values()),
1697 index=list(data[u"metadata"].keys())
1699 u"suites": pd.Series(
1700 list(data[u"suites"].values()),
1701 index=list(data[u"suites"].keys())
1703 u"tests": pd.Series(
1704 list(data[u"tests"].values()),
1705 index=list(data[u"tests"].keys())
1709 if self._input_data.get(job, None) is None:
1710 self._input_data[job] = pd.Series()
1711 self._input_data[job][str(build_nr)] = build_data
1713 self._cfg.set_input_state(job, build_nr, u"processed")
1715 def process_local_directory(self, local_dir, replace=True):
1716 """Process local directory with XML file(s). The directory is processed
1717 as a 'job' and the XML files in it as builds.
1718 If the given directory contains only sub-directories, these
1719 sub-directories processed as jobs and corresponding XML files as builds
1722 :param local_dir: Local directory to process.
1723 :param replace: If True, the information about jobs and builds is
1724 replaced by the new one, otherwise the new jobs and builds are
1726 :type local_dir: str
1729 if not isdir(local_dir):
1730 raise PresentationError(
1731 f"The directory {local_dir} does not exist."
1734 # Check if the given directory includes only files, or only directories
1735 _, dirnames, filenames = next(walk(local_dir))
1737 if filenames and not dirnames:
1740 # key: dir (job) name, value: list of file names (builds)
1742 local_dir: [join(local_dir, name) for name in filenames]
1745 elif dirnames and not filenames:
1748 # key: dir (job) name, value: list of file names (builds)
1749 local_builds = dict()
1750 for dirname in dirnames:
1752 join(local_dir, dirname, name)
1753 for name in listdir(join(local_dir, dirname))
1754 if isfile(join(local_dir, dirname, name))
1757 local_builds[dirname] = sorted(builds)
1759 elif not filenames and not dirnames:
1760 raise PresentationError(f"The directory {local_dir} is empty.")
1762 raise PresentationError(
1763 f"The directory {local_dir} can include only files or only "
1764 f"directories, not both.\nThe directory {local_dir} includes "
1765 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1769 self._cfg.builds = dict()
1771 for job, files in local_builds.items():
1772 for idx, local_file in enumerate(files):
1773 self.process_local_file(local_file, job, idx + 1, replace=False)
1776 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1777 """Return the index of character in the string which is the end of tag.
1779 :param tag_filter: The string where the end of tag is being searched.
1780 :param start: The index where the searching is stated.
1781 :param closer: The character which is the tag closer.
1782 :type tag_filter: str
1785 :returns: The index of the tag closer.
1789 idx_opener = tag_filter.index(closer, start)
1790 return tag_filter.index(closer, idx_opener + 1)
1795 def _condition(tag_filter):
1796 """Create a conditional statement from the given tag filter.
1798 :param tag_filter: Filter based on tags from the element specification.
1799 :type tag_filter: str
1800 :returns: Conditional statement which can be evaluated.
1805 index = InputData._end_of_tag(tag_filter, index)
1809 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1811 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1812 continue_on_error=False):
1813 """Filter required data from the given jobs and builds.
1815 The output data structure is:
1818 - test (or suite) 1 ID:
1824 - test (or suite) n ID:
1831 :param element: Element which will use the filtered data.
1832 :param params: Parameters which will be included in the output. If None,
1833 all parameters are included.
1834 :param data: If not None, this data is used instead of data specified
1836 :param data_set: The set of data to be filtered: tests, suites,
1838 :param continue_on_error: Continue if there is error while reading the
1839 data. The Item will be empty then
1840 :type element: pandas.Series
1844 :type continue_on_error: bool
1845 :returns: Filtered data.
1846 :rtype pandas.Series
1850 if data_set == "suites":
1852 elif element[u"filter"] in (u"all", u"template"):
1855 cond = InputData._condition(element[u"filter"])
1856 logging.debug(f" Filter: {cond}")
1858 logging.error(u" No filter defined.")
1862 params = element.get(u"parameters", None)
1864 params.append(u"type")
1866 data_to_filter = data if data else element[u"data"]
1869 for job, builds in data_to_filter.items():
1870 data[job] = pd.Series()
1871 for build in builds:
1872 data[job][str(build)] = pd.Series()
1875 self.data[job][str(build)][data_set].items())
1877 if continue_on_error:
1881 for test_id, test_data in data_dict.items():
1882 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1883 data[job][str(build)][test_id] = pd.Series()
1885 for param, val in test_data.items():
1886 data[job][str(build)][test_id][param] = val
1888 for param in params:
1890 data[job][str(build)][test_id][param] =\
1893 data[job][str(build)][test_id][param] =\
1897 except (KeyError, IndexError, ValueError) as err:
1899 f"Missing mandatory parameter in the element specification: "
1903 except AttributeError as err:
1904 logging.error(repr(err))
1906 except SyntaxError as err:
1908 f"The filter {cond} is not correct. Check if all tags are "
1909 f"enclosed by apostrophes.\n{repr(err)}"
1913 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1914 continue_on_error=False):
1915 """Filter required data from the given jobs and builds.
1917 The output data structure is:
1920 - test (or suite) 1 ID:
1926 - test (or suite) n ID:
1933 :param element: Element which will use the filtered data.
1934 :param params: Parameters which will be included in the output. If None,
1935 all parameters are included.
1936 :param data_set: The set of data to be filtered: tests, suites,
1938 :param continue_on_error: Continue if there is error while reading the
1939 data. The Item will be empty then
1940 :type element: pandas.Series
1943 :type continue_on_error: bool
1944 :returns: Filtered data.
1945 :rtype pandas.Series
1948 include = element.get(u"include", None)
1950 logging.warning(u"No tests to include, skipping the element.")
1954 params = element.get(u"parameters", None)
1956 params.append(u"type")
1960 for job, builds in element[u"data"].items():
1961 data[job] = pd.Series()
1962 for build in builds:
1963 data[job][str(build)] = pd.Series()
1964 for test in include:
1966 reg_ex = re.compile(str(test).lower())
1967 for test_id in self.data[job][
1968 str(build)][data_set].keys():
1969 if re.match(reg_ex, str(test_id).lower()):
1970 test_data = self.data[job][
1971 str(build)][data_set][test_id]
1972 data[job][str(build)][test_id] = pd.Series()
1974 for param, val in test_data.items():
1975 data[job][str(build)][test_id]\
1978 for param in params:
1980 data[job][str(build)][
1984 data[job][str(build)][
1985 test_id][param] = u"No Data"
1986 except KeyError as err:
1987 if continue_on_error:
1988 logging.debug(repr(err))
1990 logging.error(repr(err))
1994 except (KeyError, IndexError, ValueError) as err:
1996 f"Missing mandatory parameter in the element "
1997 f"specification: {repr(err)}"
2000 except AttributeError as err:
2001 logging.error(repr(err))
2005 def merge_data(data):
2006 """Merge data from more jobs and builds to a simple data structure.
2008 The output data structure is:
2010 - test (suite) 1 ID:
2016 - test (suite) n ID:
2019 :param data: Data to merge.
2020 :type data: pandas.Series
2021 :returns: Merged data.
2022 :rtype: pandas.Series
2025 logging.info(u" Merging data ...")
2027 merged_data = pd.Series()
2028 for builds in data.values:
2029 for item in builds.values:
2030 for item_id, item_data in item.items():
2031 merged_data[item_id] = item_data
2034 def print_all_oper_data(self):
2035 """Print all operational data to console.
2043 u"Cycles per Packet",
2044 u"Average Vector Size"
2047 for job in self._input_data.values:
2048 for build in job.values:
2049 for test_id, test_data in build[u"tests"].items():
2051 if test_data.get(u"show-run", None) is None:
2053 for dut_name, data in test_data[u"show-run"].items():
2054 if data.get(u"threads", None) is None:
2056 print(f"Host IP: {data.get(u'host', '')}, "
2057 f"Socket: {data.get(u'socket', '')}")
2058 for thread_nr, thread in data[u"threads"].items():
2059 txt_table = prettytable.PrettyTable(tbl_hdr)
2062 txt_table.add_row(row)
2064 if len(thread) == 0:
2067 avg = f", Average Vector Size per Node: " \
2068 f"{(avg / len(thread)):.2f}"
2069 th_name = u"main" if thread_nr == 0 \
2070 else f"worker_{thread_nr}"
2071 print(f"{dut_name}, {th_name}{avg}")
2072 txt_table.float_format = u".2"
2073 txt_table.align = u"r"
2074 txt_table.align[u"Name"] = u"l"
2075 print(f"{txt_table.get_string()}\n")