1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
234 # TODO: Remove when not needed
235 REGEX_NDRPDR_LAT_BASE = re.compile(
236 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
237 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
239 REGEX_NDRPDR_LAT = re.compile(
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
241 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
247 # TODO: Remove when not needed
248 REGEX_NDRPDR_LAT_LONG = re.compile(
249 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
250 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
251 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
254 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
255 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
256 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
257 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
259 REGEX_VERSION_VPP = re.compile(
260 r"(return STDOUT Version:\s*|"
261 r"VPP Version:\s*|VPP version:\s*)(.*)"
263 REGEX_VERSION_DPDK = re.compile(
264 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
266 REGEX_TCP = re.compile(
267 r'Total\s(rps|cps|throughput):\s(\d*).*$'
269 REGEX_MRR = re.compile(
270 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
271 r'tx\s(\d*),\srx\s(\d*)'
273 REGEX_BMRR = re.compile(
274 r'Maximum Receive Rate trial results'
275 r' in packets per second: \[(.*)\]'
277 REGEX_RECONF_LOSS = re.compile(
278 r'Packets lost due to reconfig: (\d*)'
280 REGEX_RECONF_TIME = re.compile(
281 r'Implied time lost: (\d*.[\de-]*)'
283 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
285 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
287 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
289 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
291 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
293 def __init__(self, metadata, mapping, ignore):
296 :param metadata: Key-value pairs to be included in "metadata" part of
298 :param mapping: Mapping of the old names of test cases to the new
300 :param ignore: List of TCs to be ignored.
306 # Type of message to parse out from the test messages
307 self._msg_type = None
313 self._timestamp = None
315 # Testbed. The testbed is identified by TG node IP address.
318 # Mapping of TCs long names
319 self._mapping = mapping
322 self._ignore = ignore
324 # Number of PAPI History messages found:
326 # 1 - PAPI History of DUT1
327 # 2 - PAPI History of DUT2
328 self._conf_history_lookup_nr = 0
330 self._sh_run_counter = 0
332 # Test ID of currently processed test- the lowercase full path to the
336 # The main data structure
338 u"metadata": OrderedDict(),
339 u"suites": OrderedDict(),
340 u"tests": OrderedDict()
343 # Save the provided metadata
344 for key, val in metadata.items():
345 self._data[u"metadata"][key] = val
347 # Dictionary defining the methods used to parse different types of
350 u"timestamp": self._get_timestamp,
351 u"vpp-version": self._get_vpp_version,
352 u"dpdk-version": self._get_dpdk_version,
353 # TODO: Remove when not needed:
354 u"teardown-vat-history": self._get_vat_history,
355 u"teardown-papi-history": self._get_papi_history,
356 u"test-show-runtime": self._get_show_run,
357 u"testbed": self._get_testbed
362 """Getter - Data parsed from the XML file.
364 :returns: Data parsed from the XML file.
369 def _get_data_from_mrr_test_msg(self, msg):
370 """Get info from message of MRR performance tests.
372 :param msg: Message to be processed.
374 :returns: Processed message or original message if a problem occurs.
378 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
379 if not groups or groups.lastindex != 1:
380 return u"Test Failed."
383 data = groups.group(1).split(u", ")
384 except (AttributeError, IndexError, ValueError, KeyError):
385 return u"Test Failed."
390 out_str += f"{(float(item) / 1e6):.2f}, "
391 return out_str[:-2] + u"]"
392 except (AttributeError, IndexError, ValueError, KeyError):
393 return u"Test Failed."
395 def _get_data_from_perf_test_msg(self, msg):
396 """Get info from message of NDRPDR performance tests.
398 :param msg: Message to be processed.
400 :returns: Processed message or original message if a problem occurs.
404 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
405 if not groups or groups.lastindex != 10:
406 return u"Test Failed."
410 u"ndr_low": float(groups.group(1)),
411 u"ndr_low_b": float(groups.group(2)),
412 u"pdr_low": float(groups.group(3)),
413 u"pdr_low_b": float(groups.group(4)),
414 u"pdr_lat_90_1": groups.group(5),
415 u"pdr_lat_90_2": groups.group(6),
416 u"pdr_lat_50_1": groups.group(7),
417 u"pdr_lat_50_2": groups.group(8),
418 u"pdr_lat_10_1": groups.group(9),
419 u"pdr_lat_10_2": groups.group(10),
421 except (AttributeError, IndexError, ValueError, KeyError):
422 return u"Test Failed."
424 def _process_lat(in_str_1, in_str_2):
425 """Extract min, avg, max values from latency string.
427 :param in_str_1: Latency string for one direction produced by robot
429 :param in_str_2: Latency string for second direction produced by
433 :returns: Processed latency string or None if a problem occurs.
436 in_list_1 = in_str_1.split('/', 3)
437 in_list_2 = in_str_2.split('/', 3)
439 if len(in_list_1) != 4 and len(in_list_2) != 4:
442 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
444 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
445 except hdrh.codec.HdrLengthException:
448 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
450 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
451 except hdrh.codec.HdrLengthException:
454 if hdr_lat_1 and hdr_lat_2:
456 hdr_lat_1.get_value_at_percentile(50.0),
457 hdr_lat_1.get_value_at_percentile(90.0),
458 hdr_lat_1.get_value_at_percentile(99.0),
459 hdr_lat_2.get_value_at_percentile(50.0),
460 hdr_lat_2.get_value_at_percentile(90.0),
461 hdr_lat_2.get_value_at_percentile(99.0)
471 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
472 f"{data[u'ndr_low_b']:5.2f}"
473 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
474 f"{data[u'pdr_low_b']:5.2f}"
477 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
478 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
479 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
482 max_len = len(str(max((max(item) for item in latency))))
483 max_len = 4 if max_len < 4 else max_len
485 for idx, lat in enumerate(latency):
490 f"{lat[0]:{max_len}d} "
491 f"{lat[1]:{max_len}d} "
492 f"{lat[2]:{max_len}d} "
493 f"{lat[3]:{max_len}d} "
494 f"{lat[4]:{max_len}d} "
495 f"{lat[5]:{max_len}d} "
500 except (AttributeError, IndexError, ValueError, KeyError):
501 return u"Test Failed."
503 def _get_testbed(self, msg):
504 """Called when extraction of testbed IP is required.
505 The testbed is identified by TG node IP address.
507 :param msg: Message to process.
512 if msg.message.count(u"Setup of TG node") or \
513 msg.message.count(u"Setup of node TG host"):
514 reg_tg_ip = re.compile(
515 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
517 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
518 except (KeyError, ValueError, IndexError, AttributeError):
521 self._data[u"metadata"][u"testbed"] = self._testbed
522 self._msg_type = None
524 def _get_vpp_version(self, msg):
525 """Called when extraction of VPP version is required.
527 :param msg: Message to process.
532 if msg.message.count(u"return STDOUT Version:") or \
533 msg.message.count(u"VPP Version:") or \
534 msg.message.count(u"VPP version:"):
535 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
537 self._data[u"metadata"][u"version"] = self._version
538 self._msg_type = None
540 def _get_dpdk_version(self, msg):
541 """Called when extraction of DPDK version is required.
543 :param msg: Message to process.
548 if msg.message.count(u"DPDK Version:"):
550 self._version = str(re.search(
551 self.REGEX_VERSION_DPDK, msg.message).group(2))
552 self._data[u"metadata"][u"version"] = self._version
556 self._msg_type = None
558 def _get_timestamp(self, msg):
559 """Called when extraction of timestamp is required.
561 :param msg: Message to process.
566 self._timestamp = msg.timestamp[:14]
567 self._data[u"metadata"][u"generated"] = self._timestamp
568 self._msg_type = None
570 def _get_vat_history(self, msg):
571 """Called when extraction of VAT command history is required.
573 TODO: Remove when not needed.
575 :param msg: Message to process.
579 if msg.message.count(u"VAT command history:"):
580 self._conf_history_lookup_nr += 1
581 if self._conf_history_lookup_nr == 1:
582 self._data[u"tests"][self._test_id][u"conf-history"] = str()
584 self._msg_type = None
585 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
586 r"VAT command history:", u"",
587 msg.message, count=1).replace(u'\n', u' |br| ').\
590 self._data[u"tests"][self._test_id][u"conf-history"] += (
591 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
594 def _get_papi_history(self, msg):
595 """Called when extraction of PAPI command history is required.
597 :param msg: Message to process.
601 if msg.message.count(u"PAPI command history:"):
602 self._conf_history_lookup_nr += 1
603 if self._conf_history_lookup_nr == 1:
604 self._data[u"tests"][self._test_id][u"conf-history"] = str()
606 self._msg_type = None
607 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
608 r"PAPI command history:", u"",
609 msg.message, count=1).replace(u'\n', u' |br| ').\
611 self._data[u"tests"][self._test_id][u"conf-history"] += (
612 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
615 def _get_show_run(self, msg):
616 """Called when extraction of VPP operational data (output of CLI command
617 Show Runtime) is required.
619 :param msg: Message to process.
624 if not msg.message.count(u"stats runtime"):
628 if self._sh_run_counter > 1:
631 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
632 self._data[u"tests"][self._test_id][u"show-run"] = dict()
634 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
638 host = groups.group(1)
639 except (AttributeError, IndexError):
642 sock = groups.group(2)
643 except (AttributeError, IndexError):
646 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
647 replace(u"'", u'"').replace(u'b"', u'"').
648 replace(u'u"', u'"').split(u":", 1)[1])
651 threads_nr = len(runtime[0][u"clocks"])
652 except (IndexError, KeyError):
655 dut = u"DUT{nr}".format(
656 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
661 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
665 for idx in range(threads_nr):
666 if item[u"vectors"][idx] > 0:
667 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
668 elif item[u"calls"][idx] > 0:
669 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
670 elif item[u"suspends"][idx] > 0:
671 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
675 if item[u"calls"][idx] > 0:
676 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
680 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
681 int(item[u"suspends"][idx]):
682 oper[u"threads"][idx].append([
685 item[u"vectors"][idx],
686 item[u"suspends"][idx],
691 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
693 def _get_ndrpdr_throughput(self, msg):
694 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
697 :param msg: The test message to be parsed.
699 :returns: Parsed data as a dict and the status (PASS/FAIL).
700 :rtype: tuple(dict, str)
704 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
705 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
708 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
710 if groups is not None:
712 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
713 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
714 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
715 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
717 except (IndexError, ValueError):
720 return throughput, status
722 def _get_ndrpdr_throughput_gbps(self, msg):
723 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
726 :param msg: The test message to be parsed.
728 :returns: Parsed data as a dict and the status (PASS/FAIL).
729 :rtype: tuple(dict, str)
733 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
734 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
737 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
739 if groups is not None:
741 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
742 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
743 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
744 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
746 except (IndexError, ValueError):
751 def _get_plr_throughput(self, msg):
752 """Get PLRsearch lower bound and PLRsearch upper bound from the test
755 :param msg: The test message to be parsed.
757 :returns: Parsed data as a dict and the status (PASS/FAIL).
758 :rtype: tuple(dict, str)
766 groups = re.search(self.REGEX_PLR_RATE, msg)
768 if groups is not None:
770 throughput[u"LOWER"] = float(groups.group(1))
771 throughput[u"UPPER"] = float(groups.group(2))
773 except (IndexError, ValueError):
776 return throughput, status
778 def _get_ndrpdr_latency(self, msg):
779 """Get LATENCY from the test message.
781 :param msg: The test message to be parsed.
783 :returns: Parsed data as a dict and the status (PASS/FAIL).
784 :rtype: tuple(dict, str)
794 u"direction1": copy.copy(latency_default),
795 u"direction2": copy.copy(latency_default)
798 u"direction1": copy.copy(latency_default),
799 u"direction2": copy.copy(latency_default)
802 u"direction1": copy.copy(latency_default),
803 u"direction2": copy.copy(latency_default)
806 u"direction1": copy.copy(latency_default),
807 u"direction2": copy.copy(latency_default)
810 u"direction1": copy.copy(latency_default),
811 u"direction2": copy.copy(latency_default)
814 u"direction1": copy.copy(latency_default),
815 u"direction2": copy.copy(latency_default)
819 # TODO: Rewrite when long and base are not needed
820 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
822 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
824 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
826 return latency, u"FAIL"
828 def process_latency(in_str):
829 """Return object with parsed latency values.
831 TODO: Define class for the return type.
833 :param in_str: Input string, min/avg/max/hdrh format.
835 :returns: Dict with corresponding keys, except hdrh float values.
837 :throws IndexError: If in_str does not have enough substrings.
838 :throws ValueError: If a substring does not convert to float.
840 in_list = in_str.split('/', 3)
843 u"min": float(in_list[0]),
844 u"avg": float(in_list[1]),
845 u"max": float(in_list[2]),
849 if len(in_list) == 4:
850 rval[u"hdrh"] = str(in_list[3])
855 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
856 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
857 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
858 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
859 if groups.lastindex == 4:
860 return latency, u"PASS"
861 except (IndexError, ValueError):
865 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
866 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
867 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
868 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
869 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
870 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
871 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
872 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
873 if groups.lastindex == 12:
874 return latency, u"PASS"
875 except (IndexError, ValueError):
878 # TODO: Remove when not needed
879 latency[u"NDR10"] = {
880 u"direction1": copy.copy(latency_default),
881 u"direction2": copy.copy(latency_default)
883 latency[u"NDR50"] = {
884 u"direction1": copy.copy(latency_default),
885 u"direction2": copy.copy(latency_default)
887 latency[u"NDR90"] = {
888 u"direction1": copy.copy(latency_default),
889 u"direction2": copy.copy(latency_default)
892 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
893 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
894 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
895 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
896 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
897 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
898 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
899 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
900 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
901 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
902 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
903 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
904 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
905 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
906 return latency, u"PASS"
907 except (IndexError, ValueError):
910 return latency, u"FAIL"
913 def _get_hoststack_data(msg, tags):
914 """Get data from the hoststack test message.
916 :param msg: The test message to be parsed.
917 :param tags: Test tags.
920 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
921 :rtype: tuple(dict, str)
926 msg = msg.replace(u"'", u'"').replace(u" ", u"")
927 if u"LDPRELOAD" in tags:
931 except JSONDecodeError:
933 elif u"VPPECHO" in tags:
935 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
937 client=loads(msg_lst[0]),
938 server=loads(msg_lst[1])
941 except (JSONDecodeError, IndexError):
944 return result, status
946 def visit_suite(self, suite):
947 """Implements traversing through the suite and its direct children.
949 :param suite: Suite to process.
953 if self.start_suite(suite) is not False:
954 suite.suites.visit(self)
955 suite.tests.visit(self)
956 self.end_suite(suite)
958 def start_suite(self, suite):
959 """Called when suite starts.
961 :param suite: Suite to process.
967 parent_name = suite.parent.name
968 except AttributeError:
971 doc_str = suite.doc.\
972 replace(u'"', u"'").\
973 replace(u'\n', u' ').\
974 replace(u'\r', u'').\
975 replace(u'*[', u' |br| *[').\
976 replace(u"*", u"**").\
977 replace(u' |br| *[', u'*[', 1)
979 self._data[u"suites"][suite.longname.lower().
981 replace(u" ", u"_")] = {
982 u"name": suite.name.lower(),
984 u"parent": parent_name,
985 u"level": len(suite.longname.split(u"."))
988 suite.keywords.visit(self)
990 def end_suite(self, suite):
991 """Called when suite ends.
993 :param suite: Suite to process.
998 def visit_test(self, test):
999 """Implements traversing through the test.
1001 :param test: Test to process.
1005 if self.start_test(test) is not False:
1006 test.keywords.visit(self)
1009 def start_test(self, test):
1010 """Called when test starts.
1012 :param test: Test to process.
1017 self._sh_run_counter = 0
1019 longname_orig = test.longname.lower()
1021 # Check the ignore list
1022 if longname_orig in self._ignore:
1025 tags = [str(tag) for tag in test.tags]
1026 test_result = dict()
1028 # Change the TC long name and name if defined in the mapping table
1029 longname = self._mapping.get(longname_orig, None)
1030 if longname is not None:
1031 name = longname.split(u'.')[-1]
1033 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1037 longname = longname_orig
1038 name = test.name.lower()
1040 # Remove TC number from the TC long name (backward compatibility):
1041 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1042 # Remove TC number from the TC name (not needed):
1043 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1045 test_result[u"parent"] = test.parent.name.lower()
1046 test_result[u"tags"] = tags
1047 test_result["doc"] = test.doc.\
1048 replace(u'"', u"'").\
1049 replace(u'\n', u' ').\
1050 replace(u'\r', u'').\
1051 replace(u'[', u' |br| [').\
1052 replace(u' |br| [', u'[', 1)
1053 test_result[u"type"] = u"FUNC"
1054 test_result[u"status"] = test.status
1056 if test.status == u"PASS":
1057 if u"NDRPDR" in tags:
1058 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1059 test.message).replace(u'\n', u' |br| ').\
1060 replace(u'\r', u'').replace(u'"', u"'")
1061 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1062 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1063 test.message).replace(u'\n', u' |br| ').\
1064 replace(u'\r', u'').replace(u'"', u"'")
1066 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1067 replace(u'\r', u'').replace(u'"', u"'")
1069 test_result[u"msg"] = u"Test Failed."
1071 if u"PERFTEST" in tags:
1072 # Replace info about cores (e.g. -1c-) with the info about threads
1073 # and cores (e.g. -1t1c-) in the long test case names and in the
1074 # test case names if necessary.
1075 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1079 for tag in test_result[u"tags"]:
1080 groups = re.search(self.REGEX_TC_TAG, tag)
1086 self._test_id = re.sub(
1087 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1088 self._test_id, count=1
1090 test_result[u"name"] = re.sub(
1091 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1092 test_result["name"], count=1
1095 test_result[u"status"] = u"FAIL"
1096 self._data[u"tests"][self._test_id] = test_result
1098 f"The test {self._test_id} has no or more than one "
1099 f"multi-threading tags.\n"
1100 f"Tags: {test_result[u'tags']}"
1104 if test.status == u"PASS":
1105 if u"NDRPDR" in tags:
1106 test_result[u"type"] = u"NDRPDR"
1107 test_result[u"throughput"], test_result[u"status"] = \
1108 self._get_ndrpdr_throughput(test.message)
1109 test_result[u"gbps"], test_result[u"status"] = \
1110 self._get_ndrpdr_throughput_gbps(test.message)
1111 test_result[u"latency"], test_result[u"status"] = \
1112 self._get_ndrpdr_latency(test.message)
1113 elif u"SOAK" in tags:
1114 test_result[u"type"] = u"SOAK"
1115 test_result[u"throughput"], test_result[u"status"] = \
1116 self._get_plr_throughput(test.message)
1117 elif u"HOSTSTACK" in tags:
1118 test_result[u"type"] = u"HOSTSTACK"
1119 test_result[u"result"], test_result[u"status"] = \
1120 self._get_hoststack_data(test.message, tags)
1121 elif u"TCP" in tags:
1122 test_result[u"type"] = u"TCP"
1123 groups = re.search(self.REGEX_TCP, test.message)
1124 test_result[u"result"] = int(groups.group(2))
1125 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1127 test_result[u"type"] = u"MRR"
1129 test_result[u"type"] = u"BMRR"
1131 test_result[u"result"] = dict()
1132 groups = re.search(self.REGEX_BMRR, test.message)
1133 if groups is not None:
1134 items_str = groups.group(1)
1136 float(item.strip()) for item in items_str.split(",")
1138 # Use whole list in CSIT-1180.
1139 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1140 test_result[u"result"][u"receive-rate"] = stats.avg
1141 test_result[u"result"][u"receive-stdev"] = stats.stdev
1143 groups = re.search(self.REGEX_MRR, test.message)
1144 test_result[u"result"][u"receive-rate"] = \
1145 float(groups.group(3)) / float(groups.group(1))
1146 elif u"RECONF" in tags:
1147 test_result[u"type"] = u"RECONF"
1148 test_result[u"result"] = None
1150 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1151 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1152 test_result[u"result"] = {
1153 u"loss": int(grps_loss.group(1)),
1154 u"time": float(grps_time.group(1))
1156 except (AttributeError, IndexError, ValueError, TypeError):
1157 test_result[u"status"] = u"FAIL"
1158 elif u"DEVICETEST" in tags:
1159 test_result[u"type"] = u"DEVICETEST"
1161 test_result[u"status"] = u"FAIL"
1162 self._data[u"tests"][self._test_id] = test_result
1165 self._data[u"tests"][self._test_id] = test_result
1167 def end_test(self, test):
1168 """Called when test ends.
1170 :param test: Test to process.
1175 def visit_keyword(self, keyword):
1176 """Implements traversing through the keyword and its child keywords.
1178 :param keyword: Keyword to process.
1179 :type keyword: Keyword
1182 if self.start_keyword(keyword) is not False:
1183 self.end_keyword(keyword)
1185 def start_keyword(self, keyword):
1186 """Called when keyword starts. Default implementation does nothing.
1188 :param keyword: Keyword to process.
1189 :type keyword: Keyword
1193 if keyword.type == u"setup":
1194 self.visit_setup_kw(keyword)
1195 elif keyword.type == u"teardown":
1196 self.visit_teardown_kw(keyword)
1198 self.visit_test_kw(keyword)
1199 except AttributeError:
1202 def end_keyword(self, keyword):
1203 """Called when keyword ends. Default implementation does nothing.
1205 :param keyword: Keyword to process.
1206 :type keyword: Keyword
1210 def visit_test_kw(self, test_kw):
1211 """Implements traversing through the test keyword and its child
1214 :param test_kw: Keyword to process.
1215 :type test_kw: Keyword
1218 for keyword in test_kw.keywords:
1219 if self.start_test_kw(keyword) is not False:
1220 self.visit_test_kw(keyword)
1221 self.end_test_kw(keyword)
1223 def start_test_kw(self, test_kw):
1224 """Called when test keyword starts. Default implementation does
1227 :param test_kw: Keyword to process.
1228 :type test_kw: Keyword
1231 if test_kw.name.count(u"Show Runtime On All Duts") or \
1232 test_kw.name.count(u"Show Runtime Counters On All Duts") or \
1233 test_kw.name.count(u"Vpp Show Runtime On All Duts"):
1234 self._msg_type = u"test-show-runtime"
1235 self._sh_run_counter += 1
1238 test_kw.messages.visit(self)
1240 def end_test_kw(self, test_kw):
1241 """Called when keyword ends. Default implementation does nothing.
1243 :param test_kw: Keyword to process.
1244 :type test_kw: Keyword
1248 def visit_setup_kw(self, setup_kw):
1249 """Implements traversing through the teardown keyword and its child
1252 :param setup_kw: Keyword to process.
1253 :type setup_kw: Keyword
1256 for keyword in setup_kw.keywords:
1257 if self.start_setup_kw(keyword) is not False:
1258 self.visit_setup_kw(keyword)
1259 self.end_setup_kw(keyword)
1261 def start_setup_kw(self, setup_kw):
1262 """Called when teardown keyword starts. Default implementation does
1265 :param setup_kw: Keyword to process.
1266 :type setup_kw: Keyword
1269 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1270 and not self._version:
1271 self._msg_type = u"vpp-version"
1272 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1274 self._msg_type = u"dpdk-version"
1275 elif setup_kw.name.count(u"Set Global Variable") \
1276 and not self._timestamp:
1277 self._msg_type = u"timestamp"
1278 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1279 self._msg_type = u"testbed"
1282 setup_kw.messages.visit(self)
1284 def end_setup_kw(self, setup_kw):
1285 """Called when keyword ends. Default implementation does nothing.
1287 :param setup_kw: Keyword to process.
1288 :type setup_kw: Keyword
1292 def visit_teardown_kw(self, teardown_kw):
1293 """Implements traversing through the teardown keyword and its child
1296 :param teardown_kw: Keyword to process.
1297 :type teardown_kw: Keyword
1300 for keyword in teardown_kw.keywords:
1301 if self.start_teardown_kw(keyword) is not False:
1302 self.visit_teardown_kw(keyword)
1303 self.end_teardown_kw(keyword)
1305 def start_teardown_kw(self, teardown_kw):
1306 """Called when teardown keyword starts
1308 :param teardown_kw: Keyword to process.
1309 :type teardown_kw: Keyword
1313 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1314 # TODO: Remove when not needed:
1315 self._conf_history_lookup_nr = 0
1316 self._msg_type = u"teardown-vat-history"
1317 teardown_kw.messages.visit(self)
1318 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1319 self._conf_history_lookup_nr = 0
1320 self._msg_type = u"teardown-papi-history"
1321 teardown_kw.messages.visit(self)
1323 def end_teardown_kw(self, teardown_kw):
1324 """Called when keyword ends. Default implementation does nothing.
1326 :param teardown_kw: Keyword to process.
1327 :type teardown_kw: Keyword
1331 def visit_message(self, msg):
1332 """Implements visiting the message.
1334 :param msg: Message to process.
1338 if self.start_message(msg) is not False:
1339 self.end_message(msg)
1341 def start_message(self, msg):
1342 """Called when message starts. Get required information from messages:
1345 :param msg: Message to process.
1350 self.parse_msg[self._msg_type](msg)
1352 def end_message(self, msg):
1353 """Called when message ends. Default implementation does nothing.
1355 :param msg: Message to process.
1364 The data is extracted from output.xml files generated by Jenkins jobs and
1365 stored in pandas' DataFrames.
1371 (as described in ExecutionChecker documentation)
1373 (as described in ExecutionChecker documentation)
1375 (as described in ExecutionChecker documentation)
1378 def __init__(self, spec):
1381 :param spec: Specification.
1382 :type spec: Specification
1389 self._input_data = pd.Series()
1393 """Getter - Input data.
1395 :returns: Input data
1396 :rtype: pandas.Series
1398 return self._input_data
1400 def metadata(self, job, build):
1401 """Getter - metadata
1403 :param job: Job which metadata we want.
1404 :param build: Build which metadata we want.
1408 :rtype: pandas.Series
1410 return self.data[job][build][u"metadata"]
1412 def suites(self, job, build):
1415 :param job: Job which suites we want.
1416 :param build: Build which suites we want.
1420 :rtype: pandas.Series
1422 return self.data[job][str(build)][u"suites"]
1424 def tests(self, job, build):
1427 :param job: Job which tests we want.
1428 :param build: Build which tests we want.
1432 :rtype: pandas.Series
1434 return self.data[job][build][u"tests"]
1436 def _parse_tests(self, job, build):
1437 """Process data from robot output.xml file and return JSON structured
1440 :param job: The name of job which build output data will be processed.
1441 :param build: The build which output data will be processed.
1444 :returns: JSON data structure.
1453 with open(build[u"file-name"], u'r') as data_file:
1455 result = ExecutionResult(data_file)
1456 except errors.DataError as err:
1458 f"Error occurred while parsing output.xml: {repr(err)}"
1461 checker = ExecutionChecker(metadata, self._cfg.mapping,
1463 result.visit(checker)
1467 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1468 """Download and parse the input data file.
1470 :param pid: PID of the process executing this method.
1471 :param job: Name of the Jenkins job which generated the processed input
1473 :param build: Information about the Jenkins build which generated the
1474 processed input file.
1475 :param repeat: Repeat the download specified number of times if not
1483 logging.info(f" Processing the job/build: {job}: {build[u'build']}")
1490 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1496 f"It is not possible to download the input data file from the "
1497 f"job {job}, build {build[u'build']}, or it is damaged. "
1501 logging.info(f" Processing data from build {build[u'build']}")
1502 data = self._parse_tests(job, build)
1505 f"Input data file from the job {job}, build "
1506 f"{build[u'build']} is damaged. Skipped."
1509 state = u"processed"
1512 remove(build[u"file-name"])
1513 except OSError as err:
1515 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1518 # If the time-period is defined in the specification file, remove all
1519 # files which are outside the time period.
1521 timeperiod = self._cfg.input.get(u"time-period", None)
1522 if timeperiod and data:
1524 timeperiod = timedelta(int(timeperiod))
1525 metadata = data.get(u"metadata", None)
1527 generated = metadata.get(u"generated", None)
1529 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1530 if (now - generated) > timeperiod:
1531 # Remove the data and the file:
1536 f" The build {job}/{build[u'build']} is "
1537 f"outdated, will be removed."
1539 logging.info(u" Done.")
1549 def download_and_parse_data(self, repeat=1):
1550 """Download the input data files, parse input data from input files and
1551 store in pandas' Series.
1553 :param repeat: Repeat the download specified number of times if not
1558 logging.info(u"Downloading and parsing input files ...")
1560 for job, builds in self._cfg.builds.items():
1561 for build in builds:
1563 result = self._download_and_parse_build(job, build, repeat)
1566 build_nr = result[u"build"][u"build"]
1569 data = result[u"data"]
1570 build_data = pd.Series({
1571 u"metadata": pd.Series(
1572 list(data[u"metadata"].values()),
1573 index=list(data[u"metadata"].keys())
1575 u"suites": pd.Series(
1576 list(data[u"suites"].values()),
1577 index=list(data[u"suites"].keys())
1579 u"tests": pd.Series(
1580 list(data[u"tests"].values()),
1581 index=list(data[u"tests"].keys())
1585 if self._input_data.get(job, None) is None:
1586 self._input_data[job] = pd.Series()
1587 self._input_data[job][str(build_nr)] = build_data
1589 self._cfg.set_input_file_name(
1590 job, build_nr, result[u"build"][u"file-name"])
1592 self._cfg.set_input_state(job, build_nr, result[u"state"])
1595 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1596 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1598 logging.info(u"Done.")
1600 def process_local_file(self, local_file, job=u"local", build_nr=1,
1602 """Process local XML file given as a command-line parameter.
1604 :param local_file: The file to process.
1605 :param job: Job name.
1606 :param build_nr: Build number.
1607 :param replace: If True, the information about jobs and builds is
1608 replaced by the new one, otherwise the new jobs and builds are
1610 :type local_file: str
1614 :raises: PresentationError if an error occurs.
1616 if not isfile(local_file):
1617 raise PresentationError(f"The file {local_file} does not exist.")
1620 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1621 except (IndexError, ValueError):
1626 u"status": u"failed",
1627 u"file-name": local_file
1630 self._cfg.builds = dict()
1631 self._cfg.add_build(job, build)
1633 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1634 data = self._parse_tests(job, build)
1636 raise PresentationError(
1637 f"Error occurred while parsing the file {local_file}"
1640 build_data = pd.Series({
1641 u"metadata": pd.Series(
1642 list(data[u"metadata"].values()),
1643 index=list(data[u"metadata"].keys())
1645 u"suites": pd.Series(
1646 list(data[u"suites"].values()),
1647 index=list(data[u"suites"].keys())
1649 u"tests": pd.Series(
1650 list(data[u"tests"].values()),
1651 index=list(data[u"tests"].keys())
1655 if self._input_data.get(job, None) is None:
1656 self._input_data[job] = pd.Series()
1657 self._input_data[job][str(build_nr)] = build_data
1659 self._cfg.set_input_state(job, build_nr, u"processed")
1661 def process_local_directory(self, local_dir, replace=True):
1662 """Process local directory with XML file(s). The directory is processed
1663 as a 'job' and the XML files in it as builds.
1664 If the given directory contains only sub-directories, these
1665 sub-directories processed as jobs and corresponding XML files as builds
1668 :param local_dir: Local directory to process.
1669 :param replace: If True, the information about jobs and builds is
1670 replaced by the new one, otherwise the new jobs and builds are
1672 :type local_dir: str
1675 if not isdir(local_dir):
1676 raise PresentationError(
1677 f"The directory {local_dir} does not exist."
1680 # Check if the given directory includes only files, or only directories
1681 _, dirnames, filenames = next(walk(local_dir))
1683 if filenames and not dirnames:
1686 # key: dir (job) name, value: list of file names (builds)
1688 local_dir: [join(local_dir, name) for name in filenames]
1691 elif dirnames and not filenames:
1694 # key: dir (job) name, value: list of file names (builds)
1695 local_builds = dict()
1696 for dirname in dirnames:
1698 join(local_dir, dirname, name)
1699 for name in listdir(join(local_dir, dirname))
1700 if isfile(join(local_dir, dirname, name))
1703 local_builds[dirname] = sorted(builds)
1705 elif not filenames and not dirnames:
1706 raise PresentationError(f"The directory {local_dir} is empty.")
1708 raise PresentationError(
1709 f"The directory {local_dir} can include only files or only "
1710 f"directories, not both.\nThe directory {local_dir} includes "
1711 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1715 self._cfg.builds = dict()
1717 for job, files in local_builds.items():
1718 for idx, local_file in enumerate(files):
1719 self.process_local_file(local_file, job, idx + 1, replace=False)
1722 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1723 """Return the index of character in the string which is the end of tag.
1725 :param tag_filter: The string where the end of tag is being searched.
1726 :param start: The index where the searching is stated.
1727 :param closer: The character which is the tag closer.
1728 :type tag_filter: str
1731 :returns: The index of the tag closer.
1735 idx_opener = tag_filter.index(closer, start)
1736 return tag_filter.index(closer, idx_opener + 1)
1741 def _condition(tag_filter):
1742 """Create a conditional statement from the given tag filter.
1744 :param tag_filter: Filter based on tags from the element specification.
1745 :type tag_filter: str
1746 :returns: Conditional statement which can be evaluated.
1751 index = InputData._end_of_tag(tag_filter, index)
1755 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1757 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1758 continue_on_error=False):
1759 """Filter required data from the given jobs and builds.
1761 The output data structure is:
1764 - test (or suite) 1 ID:
1770 - test (or suite) n ID:
1777 :param element: Element which will use the filtered data.
1778 :param params: Parameters which will be included in the output. If None,
1779 all parameters are included.
1780 :param data: If not None, this data is used instead of data specified
1782 :param data_set: The set of data to be filtered: tests, suites,
1784 :param continue_on_error: Continue if there is error while reading the
1785 data. The Item will be empty then
1786 :type element: pandas.Series
1790 :type continue_on_error: bool
1791 :returns: Filtered data.
1792 :rtype pandas.Series
1796 if data_set == "suites":
1798 elif element[u"filter"] in (u"all", u"template"):
1801 cond = InputData._condition(element[u"filter"])
1802 logging.debug(f" Filter: {cond}")
1804 logging.error(u" No filter defined.")
1808 params = element.get(u"parameters", None)
1810 params.append(u"type")
1812 data_to_filter = data if data else element[u"data"]
1815 for job, builds in data_to_filter.items():
1816 data[job] = pd.Series()
1817 for build in builds:
1818 data[job][str(build)] = pd.Series()
1821 self.data[job][str(build)][data_set].items())
1823 if continue_on_error:
1827 for test_id, test_data in data_dict.items():
1828 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1829 data[job][str(build)][test_id] = pd.Series()
1831 for param, val in test_data.items():
1832 data[job][str(build)][test_id][param] = val
1834 for param in params:
1836 data[job][str(build)][test_id][param] =\
1839 data[job][str(build)][test_id][param] =\
1843 except (KeyError, IndexError, ValueError) as err:
1845 f"Missing mandatory parameter in the element specification: "
1849 except AttributeError as err:
1850 logging.error(repr(err))
1852 except SyntaxError as err:
1854 f"The filter {cond} is not correct. Check if all tags are "
1855 f"enclosed by apostrophes.\n{repr(err)}"
1859 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1860 continue_on_error=False):
1861 """Filter required data from the given jobs and builds.
1863 The output data structure is:
1866 - test (or suite) 1 ID:
1872 - test (or suite) n ID:
1879 :param element: Element which will use the filtered data.
1880 :param params: Parameters which will be included in the output. If None,
1881 all parameters are included.
1882 :param data_set: The set of data to be filtered: tests, suites,
1884 :param continue_on_error: Continue if there is error while reading the
1885 data. The Item will be empty then
1886 :type element: pandas.Series
1889 :type continue_on_error: bool
1890 :returns: Filtered data.
1891 :rtype pandas.Series
1894 include = element.get(u"include", None)
1896 logging.warning(u"No tests to include, skipping the element.")
1900 params = element.get(u"parameters", None)
1902 params.append(u"type")
1906 for job, builds in element[u"data"].items():
1907 data[job] = pd.Series()
1908 for build in builds:
1909 data[job][str(build)] = pd.Series()
1910 for test in include:
1912 reg_ex = re.compile(str(test).lower())
1913 for test_id in self.data[job][
1914 str(build)][data_set].keys():
1915 if re.match(reg_ex, str(test_id).lower()):
1916 test_data = self.data[job][
1917 str(build)][data_set][test_id]
1918 data[job][str(build)][test_id] = pd.Series()
1920 for param, val in test_data.items():
1921 data[job][str(build)][test_id]\
1924 for param in params:
1926 data[job][str(build)][
1930 data[job][str(build)][
1931 test_id][param] = u"No Data"
1932 except KeyError as err:
1933 if continue_on_error:
1934 logging.debug(repr(err))
1936 logging.error(repr(err))
1940 except (KeyError, IndexError, ValueError) as err:
1942 f"Missing mandatory parameter in the element "
1943 f"specification: {repr(err)}"
1946 except AttributeError as err:
1947 logging.error(repr(err))
1951 def merge_data(data):
1952 """Merge data from more jobs and builds to a simple data structure.
1954 The output data structure is:
1956 - test (suite) 1 ID:
1962 - test (suite) n ID:
1965 :param data: Data to merge.
1966 :type data: pandas.Series
1967 :returns: Merged data.
1968 :rtype: pandas.Series
1971 logging.info(u" Merging data ...")
1973 merged_data = pd.Series()
1974 for builds in data.values:
1975 for item in builds.values:
1976 for item_id, item_data in item.items():
1977 merged_data[item_id] = item_data
1980 def print_all_oper_data(self):
1981 """Print all operational data to console.
1989 u"Cycles per Packet",
1990 u"Average Vector Size"
1993 for job in self._input_data.values:
1994 for build in job.values:
1995 for test_id, test_data in build[u"tests"].items():
1997 if test_data.get(u"show-run", None) is None:
1999 for dut_name, data in test_data[u"show-run"].items():
2000 if data.get(u"threads", None) is None:
2002 print(f"Host IP: {data.get(u'host', '')}, "
2003 f"Socket: {data.get(u'socket', '')}")
2004 for thread_nr, thread in data[u"threads"].items():
2005 txt_table = prettytable.PrettyTable(tbl_hdr)
2008 txt_table.add_row(row)
2010 if len(thread) == 0:
2013 avg = f", Average Vector Size per Node: " \
2014 f"{(avg / len(thread)):.2f}"
2015 th_name = u"main" if thread_nr == 0 \
2016 else f"worker_{thread_nr}"
2017 print(f"{dut_name}, {th_name}{avg}")
2018 txt_table.float_format = u".2"
2019 txt_table.align = u"r"
2020 txt_table.align[u"Name"] = u"l"
2021 print(f"{txt_table.get_string()}\n")