1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
28 from os import remove, walk, listdir
29 from os.path import isfile, isdir, join
30 from datetime import datetime as dt
31 from datetime import timedelta
32 from json import loads
33 from json.decoder import JSONDecodeError
40 from robot.api import ExecutionResult, ResultVisitor
41 from robot import errors
43 from resources.libraries.python import jumpavg
44 from input_data_files import download_and_unzip_data_file
45 from pal_errors import PresentationError
48 # Separator used in file names
52 class ExecutionChecker(ResultVisitor):
53 """Class to traverse through the test suite structure.
55 The functionality implemented in this class generates a json structure:
61 "generated": "Timestamp",
62 "version": "SUT version",
63 "job": "Jenkins job name",
64 "build": "Information about the build"
67 "Suite long name 1": {
69 "doc": "Suite 1 documentation",
70 "parent": "Suite 1 parent",
71 "level": "Level of the suite in the suite hierarchy"
73 "Suite long name N": {
75 "doc": "Suite N documentation",
76 "parent": "Suite 2 parent",
77 "level": "Level of the suite in the suite hierarchy"
84 "parent": "Name of the parent of the test",
85 "doc": "Test documentation",
86 "msg": "Test message",
87 "conf-history": "DUT1 and DUT2 VAT History",
88 "show-run": "Show Run",
89 "tags": ["tag 1", "tag 2", "tag n"],
91 "status": "PASS" | "FAIL",
137 "parent": "Name of the parent of the test",
138 "doc": "Test documentation",
139 "msg": "Test message",
140 "tags": ["tag 1", "tag 2", "tag n"],
142 "status": "PASS" | "FAIL",
149 "parent": "Name of the parent of the test",
150 "doc": "Test documentation",
151 "msg": "Test message",
152 "tags": ["tag 1", "tag 2", "tag n"],
153 "type": "MRR" | "BMRR",
154 "status": "PASS" | "FAIL",
156 "receive-rate": float,
157 # Average of a list, computed using AvgStdevStats.
158 # In CSIT-1180, replace with List[float].
172 "metadata": { # Optional
173 "version": "VPP version",
174 "job": "Jenkins job name",
175 "build": "Information about the build"
179 "doc": "Suite 1 documentation",
180 "parent": "Suite 1 parent",
181 "level": "Level of the suite in the suite hierarchy"
184 "doc": "Suite N documentation",
185 "parent": "Suite 2 parent",
186 "level": "Level of the suite in the suite hierarchy"
192 "parent": "Name of the parent of the test",
193 "doc": "Test documentation"
194 "msg": "Test message"
195 "tags": ["tag 1", "tag 2", "tag n"],
196 "conf-history": "DUT1 and DUT2 VAT History"
197 "show-run": "Show Run"
198 "status": "PASS" | "FAIL"
206 .. note:: ID is the lowercase full path to the test.
209 REGEX_PLR_RATE = re.compile(
210 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
211 r'PLRsearch upper bound::?\s(\d+.\d+)'
213 REGEX_NDRPDR_RATE = re.compile(
214 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'NDR_UPPER:\s(\d+.\d+).*\n'
216 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
217 r'PDR_UPPER:\s(\d+.\d+)'
219 REGEX_NDRPDR_GBPS = re.compile(
220 r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
221 r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
222 r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
223 r'PDR_UPPER:.*,\s(\d+.\d+)'
225 REGEX_PERF_MSG_INFO = re.compile(
226 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
227 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
228 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
229 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
230 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
232 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
234 # TODO: Remove when not needed
235 REGEX_NDRPDR_LAT_BASE = re.compile(
236 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
237 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
239 REGEX_NDRPDR_LAT = re.compile(
240 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
241 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
247 # TODO: Remove when not needed
248 REGEX_NDRPDR_LAT_LONG = re.compile(
249 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
250 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
251 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
252 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
253 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
254 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
255 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
256 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
257 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
259 REGEX_VERSION_VPP = re.compile(
260 r"(return STDOUT Version:\s*|"
261 r"VPP Version:\s*|VPP version:\s*)(.*)"
263 REGEX_VERSION_DPDK = re.compile(
264 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
266 REGEX_TCP = re.compile(
267 r'Total\s(rps|cps|throughput):\s(\d*).*$'
269 REGEX_MRR = re.compile(
270 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
271 r'tx\s(\d*),\srx\s(\d*)'
273 REGEX_BMRR = re.compile(
274 r'Maximum Receive Rate trial results'
275 r' in packets per second: \[(.*)\]'
277 REGEX_RECONF_LOSS = re.compile(
278 r'Packets lost due to reconfig: (\d*)'
280 REGEX_RECONF_TIME = re.compile(
281 r'Implied time lost: (\d*.[\de-]*)'
283 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
285 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
287 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
289 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
291 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
293 def __init__(self, metadata, mapping, ignore):
296 :param metadata: Key-value pairs to be included in "metadata" part of
298 :param mapping: Mapping of the old names of test cases to the new
300 :param ignore: List of TCs to be ignored.
306 # Type of message to parse out from the test messages
307 self._msg_type = None
313 self._timestamp = None
315 # Testbed. The testbed is identified by TG node IP address.
318 # Mapping of TCs long names
319 self._mapping = mapping
322 self._ignore = ignore
324 # Number of PAPI History messages found:
326 # 1 - PAPI History of DUT1
327 # 2 - PAPI History of DUT2
328 self._conf_history_lookup_nr = 0
330 self._sh_run_counter = 0
332 # Test ID of currently processed test- the lowercase full path to the
336 # The main data structure
338 u"metadata": OrderedDict(),
339 u"suites": OrderedDict(),
340 u"tests": OrderedDict()
343 # Save the provided metadata
344 for key, val in metadata.items():
345 self._data[u"metadata"][key] = val
347 # Dictionary defining the methods used to parse different types of
350 u"timestamp": self._get_timestamp,
351 u"vpp-version": self._get_vpp_version,
352 u"dpdk-version": self._get_dpdk_version,
353 # TODO: Remove when not needed:
354 u"teardown-vat-history": self._get_vat_history,
355 u"teardown-papi-history": self._get_papi_history,
356 u"test-show-runtime": self._get_show_run,
357 u"testbed": self._get_testbed
362 """Getter - Data parsed from the XML file.
364 :returns: Data parsed from the XML file.
369 def _get_data_from_mrr_test_msg(self, msg):
370 """Get info from message of MRR performance tests.
372 :param msg: Message to be processed.
374 :returns: Processed message or original message if a problem occurs.
378 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
379 if not groups or groups.lastindex != 1:
380 return u"Test Failed."
383 data = groups.group(1).split(u", ")
384 except (AttributeError, IndexError, ValueError, KeyError):
385 return u"Test Failed."
390 out_str += f"{(float(item) / 1e6):.2f}, "
391 return out_str[:-2] + u"]"
392 except (AttributeError, IndexError, ValueError, KeyError):
393 return u"Test Failed."
395 def _get_data_from_perf_test_msg(self, msg):
396 """Get info from message of NDRPDR performance tests.
398 :param msg: Message to be processed.
400 :returns: Processed message or original message if a problem occurs.
404 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
405 if not groups or groups.lastindex != 10:
406 return u"Test Failed."
410 u"ndr_low": float(groups.group(1)),
411 u"ndr_low_b": float(groups.group(2)),
412 u"pdr_low": float(groups.group(3)),
413 u"pdr_low_b": float(groups.group(4)),
414 u"pdr_lat_90_1": groups.group(5),
415 u"pdr_lat_90_2": groups.group(6),
416 u"pdr_lat_50_1": groups.group(7),
417 u"pdr_lat_50_2": groups.group(8),
418 u"pdr_lat_10_1": groups.group(9),
419 u"pdr_lat_10_2": groups.group(10),
421 except (AttributeError, IndexError, ValueError, KeyError):
422 return u"Test Failed."
424 def _process_lat(in_str_1, in_str_2):
425 """Extract min, avg, max values from latency string.
427 :param in_str_1: Latency string for one direction produced by robot
429 :param in_str_2: Latency string for second direction produced by
433 :returns: Processed latency string or None if a problem occurs.
436 in_list_1 = in_str_1.split('/', 3)
437 in_list_2 = in_str_2.split('/', 3)
439 if len(in_list_1) != 4 and len(in_list_2) != 4:
442 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
444 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
445 except hdrh.codec.HdrLengthException:
448 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
450 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
451 except hdrh.codec.HdrLengthException:
454 if hdr_lat_1 and hdr_lat_2:
456 hdr_lat_1.get_value_at_percentile(50.0),
457 hdr_lat_1.get_value_at_percentile(90.0),
458 hdr_lat_1.get_value_at_percentile(99.0),
459 hdr_lat_2.get_value_at_percentile(50.0),
460 hdr_lat_2.get_value_at_percentile(90.0),
461 hdr_lat_2.get_value_at_percentile(99.0)
471 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
472 f"{data[u'ndr_low_b']:5.2f}"
473 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
474 f"{data[u'pdr_low_b']:5.2f}"
477 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
478 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
479 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
482 max_len = len(str(max((max(item) for item in latency))))
483 max_len = 4 if max_len < 4 else max_len
485 for idx, lat in enumerate(latency):
490 f"{lat[0]:{max_len}d} "
491 f"{lat[1]:{max_len}d} "
492 f"{lat[2]:{max_len}d} "
493 f"{lat[3]:{max_len}d} "
494 f"{lat[4]:{max_len}d} "
495 f"{lat[5]:{max_len}d} "
500 except (AttributeError, IndexError, ValueError, KeyError):
501 return u"Test Failed."
503 def _get_testbed(self, msg):
504 """Called when extraction of testbed IP is required.
505 The testbed is identified by TG node IP address.
507 :param msg: Message to process.
512 if msg.message.count(u"Setup of TG node") or \
513 msg.message.count(u"Setup of node TG host"):
514 reg_tg_ip = re.compile(
515 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
517 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
518 except (KeyError, ValueError, IndexError, AttributeError):
521 self._data[u"metadata"][u"testbed"] = self._testbed
522 self._msg_type = None
524 def _get_vpp_version(self, msg):
525 """Called when extraction of VPP version is required.
527 :param msg: Message to process.
532 if msg.message.count(u"return STDOUT Version:") or \
533 msg.message.count(u"VPP Version:") or \
534 msg.message.count(u"VPP version:"):
535 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
537 self._data[u"metadata"][u"version"] = self._version
538 self._msg_type = None
540 def _get_dpdk_version(self, msg):
541 """Called when extraction of DPDK version is required.
543 :param msg: Message to process.
548 if msg.message.count(u"DPDK Version:"):
550 self._version = str(re.search(
551 self.REGEX_VERSION_DPDK, msg.message).group(2))
552 self._data[u"metadata"][u"version"] = self._version
556 self._msg_type = None
558 def _get_timestamp(self, msg):
559 """Called when extraction of timestamp is required.
561 :param msg: Message to process.
566 self._timestamp = msg.timestamp[:14]
567 self._data[u"metadata"][u"generated"] = self._timestamp
568 self._msg_type = None
570 def _get_vat_history(self, msg):
571 """Called when extraction of VAT command history is required.
573 TODO: Remove when not needed.
575 :param msg: Message to process.
579 if msg.message.count(u"VAT command history:"):
580 self._conf_history_lookup_nr += 1
581 if self._conf_history_lookup_nr == 1:
582 self._data[u"tests"][self._test_id][u"conf-history"] = str()
584 self._msg_type = None
585 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
586 r"VAT command history:", u"",
587 msg.message, count=1).replace(u'\n', u' |br| ').\
590 self._data[u"tests"][self._test_id][u"conf-history"] += (
591 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
594 def _get_papi_history(self, msg):
595 """Called when extraction of PAPI command history is required.
597 :param msg: Message to process.
601 if msg.message.count(u"PAPI command history:"):
602 self._conf_history_lookup_nr += 1
603 if self._conf_history_lookup_nr == 1:
604 self._data[u"tests"][self._test_id][u"conf-history"] = str()
606 self._msg_type = None
607 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
608 r"PAPI command history:", u"",
609 msg.message, count=1).replace(u'\n', u' |br| ').\
611 self._data[u"tests"][self._test_id][u"conf-history"] += (
612 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
615 def _get_show_run(self, msg):
616 """Called when extraction of VPP operational data (output of CLI command
617 Show Runtime) is required.
619 :param msg: Message to process.
624 if not msg.message.count(u"stats runtime"):
628 if self._sh_run_counter > 1:
631 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
632 self._data[u"tests"][self._test_id][u"show-run"] = dict()
634 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
638 host = groups.group(1)
639 except (AttributeError, IndexError):
642 sock = groups.group(2)
643 except (AttributeError, IndexError):
646 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
647 replace(u"'", u'"').replace(u'b"', u'"').
648 replace(u'u"', u'"').split(u":", 1)[1])
651 threads_nr = len(runtime[0][u"clocks"])
652 except (IndexError, KeyError):
655 dut = u"DUT{nr}".format(
656 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
661 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
665 for idx in range(threads_nr):
666 if item[u"vectors"][idx] > 0:
667 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
668 elif item[u"calls"][idx] > 0:
669 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
670 elif item[u"suspends"][idx] > 0:
671 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
675 if item[u"calls"][idx] > 0:
676 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
680 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
681 int(item[u"suspends"][idx]):
682 oper[u"threads"][idx].append([
685 item[u"vectors"][idx],
686 item[u"suspends"][idx],
691 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
693 def _get_ndrpdr_throughput(self, msg):
694 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
697 :param msg: The test message to be parsed.
699 :returns: Parsed data as a dict and the status (PASS/FAIL).
700 :rtype: tuple(dict, str)
704 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
705 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
708 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
710 if groups is not None:
712 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
713 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
714 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
715 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
717 except (IndexError, ValueError):
720 return throughput, status
722 def _get_ndrpdr_throughput_gbps(self, msg):
723 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
726 :param msg: The test message to be parsed.
728 :returns: Parsed data as a dict and the status (PASS/FAIL).
729 :rtype: tuple(dict, str)
733 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
734 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
737 groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
739 if groups is not None:
741 gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
742 gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
743 gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
744 gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
746 except (IndexError, ValueError):
751 def _get_plr_throughput(self, msg):
752 """Get PLRsearch lower bound and PLRsearch upper bound from the test
755 :param msg: The test message to be parsed.
757 :returns: Parsed data as a dict and the status (PASS/FAIL).
758 :rtype: tuple(dict, str)
766 groups = re.search(self.REGEX_PLR_RATE, msg)
768 if groups is not None:
770 throughput[u"LOWER"] = float(groups.group(1))
771 throughput[u"UPPER"] = float(groups.group(2))
773 except (IndexError, ValueError):
776 return throughput, status
778 def _get_ndrpdr_latency(self, msg):
779 """Get LATENCY from the test message.
781 :param msg: The test message to be parsed.
783 :returns: Parsed data as a dict and the status (PASS/FAIL).
784 :rtype: tuple(dict, str)
794 u"direction1": copy.copy(latency_default),
795 u"direction2": copy.copy(latency_default)
798 u"direction1": copy.copy(latency_default),
799 u"direction2": copy.copy(latency_default)
802 u"direction1": copy.copy(latency_default),
803 u"direction2": copy.copy(latency_default)
806 u"direction1": copy.copy(latency_default),
807 u"direction2": copy.copy(latency_default)
810 u"direction1": copy.copy(latency_default),
811 u"direction2": copy.copy(latency_default)
814 u"direction1": copy.copy(latency_default),
815 u"direction2": copy.copy(latency_default)
819 # TODO: Rewrite when long and base are not needed
820 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
822 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
824 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
826 return latency, u"FAIL"
828 def process_latency(in_str):
829 """Return object with parsed latency values.
831 TODO: Define class for the return type.
833 :param in_str: Input string, min/avg/max/hdrh format.
835 :returns: Dict with corresponding keys, except hdrh float values.
837 :throws IndexError: If in_str does not have enough substrings.
838 :throws ValueError: If a substring does not convert to float.
840 in_list = in_str.split('/', 3)
843 u"min": float(in_list[0]),
844 u"avg": float(in_list[1]),
845 u"max": float(in_list[2]),
849 if len(in_list) == 4:
850 rval[u"hdrh"] = str(in_list[3])
855 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
856 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
857 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
858 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
859 if groups.lastindex == 4:
860 return latency, u"PASS"
861 except (IndexError, ValueError):
865 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
866 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
867 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
868 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
869 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
870 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
871 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
872 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
873 if groups.lastindex == 12:
874 return latency, u"PASS"
875 except (IndexError, ValueError):
878 # TODO: Remove when not needed
879 latency[u"NDR10"] = {
880 u"direction1": copy.copy(latency_default),
881 u"direction2": copy.copy(latency_default)
883 latency[u"NDR50"] = {
884 u"direction1": copy.copy(latency_default),
885 u"direction2": copy.copy(latency_default)
887 latency[u"NDR90"] = {
888 u"direction1": copy.copy(latency_default),
889 u"direction2": copy.copy(latency_default)
892 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
893 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
894 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
895 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
896 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
897 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
898 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
899 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
900 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
901 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
902 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
903 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
904 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
905 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
906 return latency, u"PASS"
907 except (IndexError, ValueError):
910 return latency, u"FAIL"
913 def _get_hoststack_data(msg, tags):
914 """Get data from the hoststack test message.
916 :param msg: The test message to be parsed.
917 :param tags: Test tags.
920 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
921 :rtype: tuple(dict, str)
926 msg = msg.replace(u"'", u'"').replace(u" ", u"")
927 if u"LDPRELOAD" in tags:
931 except JSONDecodeError:
933 elif u"VPPECHO" in tags:
935 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
937 client=loads(msg_lst[0]),
938 server=loads(msg_lst[1])
941 except (JSONDecodeError, IndexError):
944 return result, status
946 def visit_suite(self, suite):
947 """Implements traversing through the suite and its direct children.
949 :param suite: Suite to process.
953 if self.start_suite(suite) is not False:
954 suite.suites.visit(self)
955 suite.tests.visit(self)
956 self.end_suite(suite)
958 def start_suite(self, suite):
959 """Called when suite starts.
961 :param suite: Suite to process.
967 parent_name = suite.parent.name
968 except AttributeError:
971 doc_str = suite.doc.\
972 replace(u'"', u"'").\
973 replace(u'\n', u' ').\
974 replace(u'\r', u'').\
975 replace(u'*[', u' |br| *[').\
976 replace(u"*", u"**").\
977 replace(u' |br| *[', u'*[', 1)
979 self._data[u"suites"][suite.longname.lower().
981 replace(u" ", u"_")] = {
982 u"name": suite.name.lower(),
984 u"parent": parent_name,
985 u"level": len(suite.longname.split(u"."))
988 suite.keywords.visit(self)
990 def end_suite(self, suite):
991 """Called when suite ends.
993 :param suite: Suite to process.
998 def visit_test(self, test):
999 """Implements traversing through the test.
1001 :param test: Test to process.
1005 if self.start_test(test) is not False:
1006 test.keywords.visit(self)
1009 def start_test(self, test):
1010 """Called when test starts.
1012 :param test: Test to process.
1017 self._sh_run_counter = 0
1019 longname_orig = test.longname.lower()
1021 # Check the ignore list
1022 if longname_orig in self._ignore:
1025 tags = [str(tag) for tag in test.tags]
1026 test_result = dict()
1028 # Change the TC long name and name if defined in the mapping table
1029 longname = self._mapping.get(longname_orig, None)
1030 if longname is not None:
1031 name = longname.split(u'.')[-1]
1033 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1037 longname = longname_orig
1038 name = test.name.lower()
1040 # Remove TC number from the TC long name (backward compatibility):
1041 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1042 # Remove TC number from the TC name (not needed):
1043 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1045 test_result[u"parent"] = test.parent.name.lower()
1046 test_result[u"tags"] = tags
1047 test_result["doc"] = test.doc.\
1048 replace(u'"', u"'").\
1049 replace(u'\n', u' ').\
1050 replace(u'\r', u'').\
1051 replace(u'[', u' |br| [').\
1052 replace(u' |br| [', u'[', 1)
1053 test_result[u"type"] = u"FUNC"
1054 test_result[u"status"] = test.status
1056 if test.status == u"PASS":
1057 if u"NDRPDR" in tags:
1058 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1059 test.message).replace(u'\n', u' |br| ').\
1060 replace(u'\r', u'').replace(u'"', u"'")
1061 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1062 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1063 test.message).replace(u'\n', u' |br| ').\
1064 replace(u'\r', u'').replace(u'"', u"'")
1066 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1067 replace(u'\r', u'').replace(u'"', u"'")
1069 test_result[u"msg"] = u"Test Failed."
1071 if u"PERFTEST" in tags:
1072 # Replace info about cores (e.g. -1c-) with the info about threads
1073 # and cores (e.g. -1t1c-) in the long test case names and in the
1074 # test case names if necessary.
1075 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1079 for tag in test_result[u"tags"]:
1080 groups = re.search(self.REGEX_TC_TAG, tag)
1086 self._test_id = re.sub(
1087 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1088 self._test_id, count=1
1090 test_result[u"name"] = re.sub(
1091 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1092 test_result["name"], count=1
1095 test_result[u"status"] = u"FAIL"
1096 self._data[u"tests"][self._test_id] = test_result
1098 f"The test {self._test_id} has no or more than one "
1099 f"multi-threading tags.\n"
1100 f"Tags: {test_result[u'tags']}"
1104 if test.status == u"PASS":
1105 if u"NDRPDR" in tags:
1106 test_result[u"type"] = u"NDRPDR"
1107 test_result[u"throughput"], test_result[u"status"] = \
1108 self._get_ndrpdr_throughput(test.message)
1109 test_result[u"gbps"], test_result[u"status"] = \
1110 self._get_ndrpdr_throughput_gbps(test.message)
1111 test_result[u"latency"], test_result[u"status"] = \
1112 self._get_ndrpdr_latency(test.message)
1113 elif u"SOAK" in tags:
1114 test_result[u"type"] = u"SOAK"
1115 test_result[u"throughput"], test_result[u"status"] = \
1116 self._get_plr_throughput(test.message)
1117 elif u"HOSTSTACK" in tags:
1118 test_result[u"type"] = u"HOSTSTACK"
1119 test_result[u"result"], test_result[u"status"] = \
1120 self._get_hoststack_data(test.message, tags)
1121 elif u"TCP" in tags:
1122 test_result[u"type"] = u"TCP"
1123 groups = re.search(self.REGEX_TCP, test.message)
1124 test_result[u"result"] = int(groups.group(2))
1125 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1127 test_result[u"type"] = u"MRR"
1129 test_result[u"type"] = u"BMRR"
1131 test_result[u"result"] = dict()
1132 groups = re.search(self.REGEX_BMRR, test.message)
1133 if groups is not None:
1134 items_str = groups.group(1)
1136 float(item.strip()) for item in items_str.split(",")
1138 # Use whole list in CSIT-1180.
1139 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1140 test_result[u"result"][u"receive-rate"] = stats.avg
1141 test_result[u"result"][u"receive-stdev"] = stats.stdev
1143 groups = re.search(self.REGEX_MRR, test.message)
1144 test_result[u"result"][u"receive-rate"] = \
1145 float(groups.group(3)) / float(groups.group(1))
1146 elif u"RECONF" in tags:
1147 test_result[u"type"] = u"RECONF"
1148 test_result[u"result"] = None
1150 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1151 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1152 test_result[u"result"] = {
1153 u"loss": int(grps_loss.group(1)),
1154 u"time": float(grps_time.group(1))
1156 except (AttributeError, IndexError, ValueError, TypeError):
1157 test_result[u"status"] = u"FAIL"
1158 elif u"DEVICETEST" in tags:
1159 test_result[u"type"] = u"DEVICETEST"
1161 test_result[u"status"] = u"FAIL"
1162 self._data[u"tests"][self._test_id] = test_result
1165 self._data[u"tests"][self._test_id] = test_result
1167 def end_test(self, test):
1168 """Called when test ends.
1170 :param test: Test to process.
1175 def visit_keyword(self, keyword):
1176 """Implements traversing through the keyword and its child keywords.
1178 :param keyword: Keyword to process.
1179 :type keyword: Keyword
1182 if self.start_keyword(keyword) is not False:
1183 self.end_keyword(keyword)
1185 def start_keyword(self, keyword):
1186 """Called when keyword starts. Default implementation does nothing.
1188 :param keyword: Keyword to process.
1189 :type keyword: Keyword
1193 if keyword.type == u"setup":
1194 self.visit_setup_kw(keyword)
1195 elif keyword.type == u"teardown":
1196 self.visit_teardown_kw(keyword)
1198 self.visit_test_kw(keyword)
1199 except AttributeError:
1202 def end_keyword(self, keyword):
1203 """Called when keyword ends. Default implementation does nothing.
1205 :param keyword: Keyword to process.
1206 :type keyword: Keyword
1210 def visit_test_kw(self, test_kw):
1211 """Implements traversing through the test keyword and its child
1214 :param test_kw: Keyword to process.
1215 :type test_kw: Keyword
1218 for keyword in test_kw.keywords:
1219 if self.start_test_kw(keyword) is not False:
1220 self.visit_test_kw(keyword)
1221 self.end_test_kw(keyword)
1223 def start_test_kw(self, test_kw):
1224 """Called when test keyword starts. Default implementation does
1227 :param test_kw: Keyword to process.
1228 :type test_kw: Keyword
1231 if test_kw.name.count(u"Show Runtime On All Duts") or \
1232 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1233 self._msg_type = u"test-show-runtime"
1234 self._sh_run_counter += 1
1237 test_kw.messages.visit(self)
1239 def end_test_kw(self, test_kw):
1240 """Called when keyword ends. Default implementation does nothing.
1242 :param test_kw: Keyword to process.
1243 :type test_kw: Keyword
1247 def visit_setup_kw(self, setup_kw):
1248 """Implements traversing through the teardown keyword and its child
1251 :param setup_kw: Keyword to process.
1252 :type setup_kw: Keyword
1255 for keyword in setup_kw.keywords:
1256 if self.start_setup_kw(keyword) is not False:
1257 self.visit_setup_kw(keyword)
1258 self.end_setup_kw(keyword)
1260 def start_setup_kw(self, setup_kw):
1261 """Called when teardown keyword starts. Default implementation does
1264 :param setup_kw: Keyword to process.
1265 :type setup_kw: Keyword
1268 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1269 and not self._version:
1270 self._msg_type = u"vpp-version"
1271 elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
1273 self._msg_type = u"dpdk-version"
1274 elif setup_kw.name.count(u"Set Global Variable") \
1275 and not self._timestamp:
1276 self._msg_type = u"timestamp"
1277 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1278 self._msg_type = u"testbed"
1281 setup_kw.messages.visit(self)
1283 def end_setup_kw(self, setup_kw):
1284 """Called when keyword ends. Default implementation does nothing.
1286 :param setup_kw: Keyword to process.
1287 :type setup_kw: Keyword
1291 def visit_teardown_kw(self, teardown_kw):
1292 """Implements traversing through the teardown keyword and its child
1295 :param teardown_kw: Keyword to process.
1296 :type teardown_kw: Keyword
1299 for keyword in teardown_kw.keywords:
1300 if self.start_teardown_kw(keyword) is not False:
1301 self.visit_teardown_kw(keyword)
1302 self.end_teardown_kw(keyword)
1304 def start_teardown_kw(self, teardown_kw):
1305 """Called when teardown keyword starts
1307 :param teardown_kw: Keyword to process.
1308 :type teardown_kw: Keyword
1312 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1313 # TODO: Remove when not needed:
1314 self._conf_history_lookup_nr = 0
1315 self._msg_type = u"teardown-vat-history"
1316 teardown_kw.messages.visit(self)
1317 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1318 self._conf_history_lookup_nr = 0
1319 self._msg_type = u"teardown-papi-history"
1320 teardown_kw.messages.visit(self)
1322 def end_teardown_kw(self, teardown_kw):
1323 """Called when keyword ends. Default implementation does nothing.
1325 :param teardown_kw: Keyword to process.
1326 :type teardown_kw: Keyword
1330 def visit_message(self, msg):
1331 """Implements visiting the message.
1333 :param msg: Message to process.
1337 if self.start_message(msg) is not False:
1338 self.end_message(msg)
1340 def start_message(self, msg):
1341 """Called when message starts. Get required information from messages:
1344 :param msg: Message to process.
1349 self.parse_msg[self._msg_type](msg)
1351 def end_message(self, msg):
1352 """Called when message ends. Default implementation does nothing.
1354 :param msg: Message to process.
1363 The data is extracted from output.xml files generated by Jenkins jobs and
1364 stored in pandas' DataFrames.
1370 (as described in ExecutionChecker documentation)
1372 (as described in ExecutionChecker documentation)
1374 (as described in ExecutionChecker documentation)
1377 def __init__(self, spec):
1380 :param spec: Specification.
1381 :type spec: Specification
1388 self._input_data = pd.Series()
1392 """Getter - Input data.
1394 :returns: Input data
1395 :rtype: pandas.Series
1397 return self._input_data
1399 def metadata(self, job, build):
1400 """Getter - metadata
1402 :param job: Job which metadata we want.
1403 :param build: Build which metadata we want.
1407 :rtype: pandas.Series
1409 return self.data[job][build][u"metadata"]
1411 def suites(self, job, build):
1414 :param job: Job which suites we want.
1415 :param build: Build which suites we want.
1419 :rtype: pandas.Series
1421 return self.data[job][str(build)][u"suites"]
1423 def tests(self, job, build):
1426 :param job: Job which tests we want.
1427 :param build: Build which tests we want.
1431 :rtype: pandas.Series
1433 return self.data[job][build][u"tests"]
1435 def _parse_tests(self, job, build):
1436 """Process data from robot output.xml file and return JSON structured
1439 :param job: The name of job which build output data will be processed.
1440 :param build: The build which output data will be processed.
1443 :returns: JSON data structure.
1452 with open(build[u"file-name"], u'r') as data_file:
1454 result = ExecutionResult(data_file)
1455 except errors.DataError as err:
1457 f"Error occurred while parsing output.xml: {repr(err)}"
1460 checker = ExecutionChecker(metadata, self._cfg.mapping,
1462 result.visit(checker)
1466 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1467 """Download and parse the input data file.
1469 :param pid: PID of the process executing this method.
1470 :param job: Name of the Jenkins job which generated the processed input
1472 :param build: Information about the Jenkins build which generated the
1473 processed input file.
1474 :param repeat: Repeat the download specified number of times if not
1482 logging.info(f" Processing the job/build: {job}: {build[u'build']}")
1489 success = download_and_unzip_data_file(self._cfg, job, build, pid)
1495 f"It is not possible to download the input data file from the "
1496 f"job {job}, build {build[u'build']}, or it is damaged. "
1500 logging.info(f" Processing data from build {build[u'build']}")
1501 data = self._parse_tests(job, build)
1504 f"Input data file from the job {job}, build "
1505 f"{build[u'build']} is damaged. Skipped."
1508 state = u"processed"
1511 remove(build[u"file-name"])
1512 except OSError as err:
1514 f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
1517 # If the time-period is defined in the specification file, remove all
1518 # files which are outside the time period.
1520 timeperiod = self._cfg.input.get(u"time-period", None)
1521 if timeperiod and data:
1523 timeperiod = timedelta(int(timeperiod))
1524 metadata = data.get(u"metadata", None)
1526 generated = metadata.get(u"generated", None)
1528 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1529 if (now - generated) > timeperiod:
1530 # Remove the data and the file:
1535 f" The build {job}/{build[u'build']} is "
1536 f"outdated, will be removed."
1538 logging.info(u" Done.")
1548 def download_and_parse_data(self, repeat=1):
1549 """Download the input data files, parse input data from input files and
1550 store in pandas' Series.
1552 :param repeat: Repeat the download specified number of times if not
1557 logging.info(u"Downloading and parsing input files ...")
1559 for job, builds in self._cfg.builds.items():
1560 for build in builds:
1562 result = self._download_and_parse_build(job, build, repeat)
1565 build_nr = result[u"build"][u"build"]
1568 data = result[u"data"]
1569 build_data = pd.Series({
1570 u"metadata": pd.Series(
1571 list(data[u"metadata"].values()),
1572 index=list(data[u"metadata"].keys())
1574 u"suites": pd.Series(
1575 list(data[u"suites"].values()),
1576 index=list(data[u"suites"].keys())
1578 u"tests": pd.Series(
1579 list(data[u"tests"].values()),
1580 index=list(data[u"tests"].keys())
1584 if self._input_data.get(job, None) is None:
1585 self._input_data[job] = pd.Series()
1586 self._input_data[job][str(build_nr)] = build_data
1588 self._cfg.set_input_file_name(
1589 job, build_nr, result[u"build"][u"file-name"])
1591 self._cfg.set_input_state(job, build_nr, result[u"state"])
1594 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1595 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1597 logging.info(u"Done.")
1599 def process_local_file(self, local_file, job=u"local", build_nr=1,
1601 """Process local XML file given as a command-line parameter.
1603 :param local_file: The file to process.
1604 :param job: Job name.
1605 :param build_nr: Build number.
1606 :param replace: If True, the information about jobs and builds is
1607 replaced by the new one, otherwise the new jobs and builds are
1609 :type local_file: str
1613 :raises: PresentationError if an error occurs.
1615 if not isfile(local_file):
1616 raise PresentationError(f"The file {local_file} does not exist.")
1619 build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
1620 except (IndexError, ValueError):
1625 u"status": u"failed",
1626 u"file-name": local_file
1629 self._cfg.builds = dict()
1630 self._cfg.add_build(job, build)
1632 logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
1633 data = self._parse_tests(job, build)
1635 raise PresentationError(
1636 f"Error occurred while parsing the file {local_file}"
1639 build_data = pd.Series({
1640 u"metadata": pd.Series(
1641 list(data[u"metadata"].values()),
1642 index=list(data[u"metadata"].keys())
1644 u"suites": pd.Series(
1645 list(data[u"suites"].values()),
1646 index=list(data[u"suites"].keys())
1648 u"tests": pd.Series(
1649 list(data[u"tests"].values()),
1650 index=list(data[u"tests"].keys())
1654 if self._input_data.get(job, None) is None:
1655 self._input_data[job] = pd.Series()
1656 self._input_data[job][str(build_nr)] = build_data
1658 self._cfg.set_input_state(job, build_nr, u"processed")
1660 def process_local_directory(self, local_dir, replace=True):
1661 """Process local directory with XML file(s). The directory is processed
1662 as a 'job' and the XML files in it as builds.
1663 If the given directory contains only sub-directories, these
1664 sub-directories processed as jobs and corresponding XML files as builds
1667 :param local_dir: Local directory to process.
1668 :param replace: If True, the information about jobs and builds is
1669 replaced by the new one, otherwise the new jobs and builds are
1671 :type local_dir: str
1674 if not isdir(local_dir):
1675 raise PresentationError(
1676 f"The directory {local_dir} does not exist."
1679 # Check if the given directory includes only files, or only directories
1680 _, dirnames, filenames = next(walk(local_dir))
1682 if filenames and not dirnames:
1685 # key: dir (job) name, value: list of file names (builds)
1687 local_dir: [join(local_dir, name) for name in filenames]
1690 elif dirnames and not filenames:
1693 # key: dir (job) name, value: list of file names (builds)
1694 local_builds = dict()
1695 for dirname in dirnames:
1697 join(local_dir, dirname, name)
1698 for name in listdir(join(local_dir, dirname))
1699 if isfile(join(local_dir, dirname, name))
1702 local_builds[dirname] = sorted(builds)
1704 elif not filenames and not dirnames:
1705 raise PresentationError(f"The directory {local_dir} is empty.")
1707 raise PresentationError(
1708 f"The directory {local_dir} can include only files or only "
1709 f"directories, not both.\nThe directory {local_dir} includes "
1710 f"file(s):\n{filenames}\nand directories:\n{dirnames}"
1714 self._cfg.builds = dict()
1716 for job, files in local_builds.items():
1717 for idx, local_file in enumerate(files):
1718 self.process_local_file(local_file, job, idx + 1, replace=False)
1721 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1722 """Return the index of character in the string which is the end of tag.
1724 :param tag_filter: The string where the end of tag is being searched.
1725 :param start: The index where the searching is stated.
1726 :param closer: The character which is the tag closer.
1727 :type tag_filter: str
1730 :returns: The index of the tag closer.
1734 idx_opener = tag_filter.index(closer, start)
1735 return tag_filter.index(closer, idx_opener + 1)
1740 def _condition(tag_filter):
1741 """Create a conditional statement from the given tag filter.
1743 :param tag_filter: Filter based on tags from the element specification.
1744 :type tag_filter: str
1745 :returns: Conditional statement which can be evaluated.
1750 index = InputData._end_of_tag(tag_filter, index)
1754 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1756 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1757 continue_on_error=False):
1758 """Filter required data from the given jobs and builds.
1760 The output data structure is:
1763 - test (or suite) 1 ID:
1769 - test (or suite) n ID:
1776 :param element: Element which will use the filtered data.
1777 :param params: Parameters which will be included in the output. If None,
1778 all parameters are included.
1779 :param data: If not None, this data is used instead of data specified
1781 :param data_set: The set of data to be filtered: tests, suites,
1783 :param continue_on_error: Continue if there is error while reading the
1784 data. The Item will be empty then
1785 :type element: pandas.Series
1789 :type continue_on_error: bool
1790 :returns: Filtered data.
1791 :rtype pandas.Series
1795 if data_set == "suites":
1797 elif element[u"filter"] in (u"all", u"template"):
1800 cond = InputData._condition(element[u"filter"])
1801 logging.debug(f" Filter: {cond}")
1803 logging.error(u" No filter defined.")
1807 params = element.get(u"parameters", None)
1809 params.append(u"type")
1811 data_to_filter = data if data else element[u"data"]
1814 for job, builds in data_to_filter.items():
1815 data[job] = pd.Series()
1816 for build in builds:
1817 data[job][str(build)] = pd.Series()
1820 self.data[job][str(build)][data_set].items())
1822 if continue_on_error:
1826 for test_id, test_data in data_dict.items():
1827 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1828 data[job][str(build)][test_id] = pd.Series()
1830 for param, val in test_data.items():
1831 data[job][str(build)][test_id][param] = val
1833 for param in params:
1835 data[job][str(build)][test_id][param] =\
1838 data[job][str(build)][test_id][param] =\
1842 except (KeyError, IndexError, ValueError) as err:
1844 f"Missing mandatory parameter in the element specification: "
1848 except AttributeError as err:
1849 logging.error(repr(err))
1851 except SyntaxError as err:
1853 f"The filter {cond} is not correct. Check if all tags are "
1854 f"enclosed by apostrophes.\n{repr(err)}"
1858 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1859 continue_on_error=False):
1860 """Filter required data from the given jobs and builds.
1862 The output data structure is:
1865 - test (or suite) 1 ID:
1871 - test (or suite) n ID:
1878 :param element: Element which will use the filtered data.
1879 :param params: Parameters which will be included in the output. If None,
1880 all parameters are included.
1881 :param data_set: The set of data to be filtered: tests, suites,
1883 :param continue_on_error: Continue if there is error while reading the
1884 data. The Item will be empty then
1885 :type element: pandas.Series
1888 :type continue_on_error: bool
1889 :returns: Filtered data.
1890 :rtype pandas.Series
1893 include = element.get(u"include", None)
1895 logging.warning(u"No tests to include, skipping the element.")
1899 params = element.get(u"parameters", None)
1901 params.append(u"type")
1905 for job, builds in element[u"data"].items():
1906 data[job] = pd.Series()
1907 for build in builds:
1908 data[job][str(build)] = pd.Series()
1909 for test in include:
1911 reg_ex = re.compile(str(test).lower())
1912 for test_id in self.data[job][
1913 str(build)][data_set].keys():
1914 if re.match(reg_ex, str(test_id).lower()):
1915 test_data = self.data[job][
1916 str(build)][data_set][test_id]
1917 data[job][str(build)][test_id] = pd.Series()
1919 for param, val in test_data.items():
1920 data[job][str(build)][test_id]\
1923 for param in params:
1925 data[job][str(build)][
1929 data[job][str(build)][
1930 test_id][param] = u"No Data"
1931 except KeyError as err:
1932 if continue_on_error:
1933 logging.debug(repr(err))
1935 logging.error(repr(err))
1939 except (KeyError, IndexError, ValueError) as err:
1941 f"Missing mandatory parameter in the element "
1942 f"specification: {repr(err)}"
1945 except AttributeError as err:
1946 logging.error(repr(err))
1950 def merge_data(data):
1951 """Merge data from more jobs and builds to a simple data structure.
1953 The output data structure is:
1955 - test (suite) 1 ID:
1961 - test (suite) n ID:
1964 :param data: Data to merge.
1965 :type data: pandas.Series
1966 :returns: Merged data.
1967 :rtype: pandas.Series
1970 logging.info(u" Merging data ...")
1972 merged_data = pd.Series()
1973 for builds in data.values:
1974 for item in builds.values:
1975 for item_id, item_data in item.items():
1976 merged_data[item_id] = item_data
1979 def print_all_oper_data(self):
1980 """Print all operational data to console.
1988 u"Cycles per Packet",
1989 u"Average Vector Size"
1992 for job in self._input_data.values:
1993 for build in job.values:
1994 for test_id, test_data in build[u"tests"].items():
1996 if test_data.get(u"show-run", None) is None:
1998 for dut_name, data in test_data[u"show-run"].items():
1999 if data.get(u"threads", None) is None:
2001 print(f"Host IP: {data.get(u'host', '')}, "
2002 f"Socket: {data.get(u'socket', '')}")
2003 for thread_nr, thread in data[u"threads"].items():
2004 txt_table = prettytable.PrettyTable(tbl_hdr)
2007 txt_table.add_row(row)
2009 if len(thread) == 0:
2012 avg = f", Average Vector Size per Node: " \
2013 f"{(avg / len(thread)):.2f}"
2014 th_name = u"main" if thread_nr == 0 \
2015 else f"worker_{thread_nr}"
2016 print(f"{dut_name}, {th_name}{avg}")
2017 txt_table.float_format = u".2"
2018 txt_table.align = u"r"
2019 txt_table.align[u"Name"] = u"l"
2020 print(f"{txt_table.get_string()}\n")