1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "conf-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
132 "parent": "Name of the parent of the test",
133 "doc": "Test documentation",
134 "msg": "Test message",
135 "tags": ["tag 1", "tag 2", "tag n"],
137 "status": "PASS" | "FAIL",
144 "parent": "Name of the parent of the test",
145 "doc": "Test documentation",
146 "msg": "Test message",
147 "tags": ["tag 1", "tag 2", "tag n"],
148 "type": "MRR" | "BMRR",
149 "status": "PASS" | "FAIL",
151 "receive-rate": float,
152 # Average of a list, computed using AvgStdevStats.
153 # In CSIT-1180, replace with List[float].
167 "metadata": { # Optional
168 "version": "VPP version",
169 "job": "Jenkins job name",
170 "build": "Information about the build"
174 "doc": "Suite 1 documentation",
175 "parent": "Suite 1 parent",
176 "level": "Level of the suite in the suite hierarchy"
179 "doc": "Suite N documentation",
180 "parent": "Suite 2 parent",
181 "level": "Level of the suite in the suite hierarchy"
187 "parent": "Name of the parent of the test",
188 "doc": "Test documentation"
189 "msg": "Test message"
190 "tags": ["tag 1", "tag 2", "tag n"],
191 "conf-history": "DUT1 and DUT2 VAT History"
192 "show-run": "Show Run"
193 "status": "PASS" | "FAIL"
201 .. note:: ID is the lowercase full path to the test.
204 REGEX_PLR_RATE = re.compile(
205 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
206 r'PLRsearch upper bound::?\s(\d+.\d+)'
208 REGEX_NDRPDR_RATE = re.compile(
209 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'NDR_UPPER:\s(\d+.\d+).*\n'
211 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'PDR_UPPER:\s(\d+.\d+)'
214 REGEX_PERF_MSG_INFO = re.compile(
215 r'NDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
216 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
217 r'NDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
219 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'PDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*)'
222 # TODO: Remove when not needed
223 REGEX_NDRPDR_LAT_BASE = re.compile(
224 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
225 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
227 REGEX_NDRPDR_LAT = re.compile(
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
229 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
230 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
231 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
235 # TODO: Remove when not needed
236 REGEX_NDRPDR_LAT_LONG = re.compile(
237 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
238 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
239 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
240 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
247 REGEX_VERSION_VPP = re.compile(
248 r"(return STDOUT Version:\s*|"
249 r"VPP Version:\s*|VPP version:\s*)(.*)"
251 REGEX_VERSION_DPDK = re.compile(
252 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
254 REGEX_TCP = re.compile(
255 r'Total\s(rps|cps|throughput):\s(\d*).*$'
257 REGEX_MRR = re.compile(
258 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
259 r'tx\s(\d*),\srx\s(\d*)'
261 REGEX_BMRR = re.compile(
262 r'Maximum Receive Rate trial results'
263 r' in packets per second: \[(.*)\]'
265 REGEX_RECONF_LOSS = re.compile(
266 r'Packets lost due to reconfig: (\d*)'
268 REGEX_RECONF_TIME = re.compile(
269 r'Implied time lost: (\d*.[\de-]*)'
271 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
273 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
275 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
277 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
279 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
281 def __init__(self, metadata, mapping, ignore):
284 :param metadata: Key-value pairs to be included in "metadata" part of
286 :param mapping: Mapping of the old names of test cases to the new
288 :param ignore: List of TCs to be ignored.
294 # Type of message to parse out from the test messages
295 self._msg_type = None
301 self._timestamp = None
303 # Testbed. The testbed is identified by TG node IP address.
306 # Mapping of TCs long names
307 self._mapping = mapping
310 self._ignore = ignore
312 # Number of PAPI History messages found:
314 # 1 - PAPI History of DUT1
315 # 2 - PAPI History of DUT2
316 self._conf_history_lookup_nr = 0
318 # Test ID of currently processed test- the lowercase full path to the
322 # The main data structure
324 u"metadata": OrderedDict(),
325 u"suites": OrderedDict(),
326 u"tests": OrderedDict()
329 # Save the provided metadata
330 for key, val in metadata.items():
331 self._data[u"metadata"][key] = val
333 # Dictionary defining the methods used to parse different types of
336 u"timestamp": self._get_timestamp,
337 u"vpp-version": self._get_vpp_version,
338 u"dpdk-version": self._get_dpdk_version,
339 # TODO: Remove when not needed:
340 u"teardown-vat-history": self._get_vat_history,
341 u"teardown-papi-history": self._get_papi_history,
342 u"test-show-runtime": self._get_show_run,
343 u"testbed": self._get_testbed
348 """Getter - Data parsed from the XML file.
350 :returns: Data parsed from the XML file.
355 def _get_data_from_perf_test_msg(self, msg):
363 from message of NDRPDR performance tests.
365 :param msg: Message to be processed.
367 :returns: Processed message or original message if a problem occurs.
371 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
372 if not groups or groups.lastindex != 20:
377 u"ndr_low": float(groups.group(1)),
378 u"ndr_low_unit": groups.group(2),
379 u"ndr_low_b": float(groups.group(3)),
380 u"ndr_low_b_unit": groups.group(4),
381 u"ndr_lat_1": groups.group(5),
382 u"ndr_lat_2": groups.group(6),
383 u"ndr_up": float(groups.group(7)),
384 u"ndr_up_unit": groups.group(8),
385 u"ndr_up_b": float(groups.group(9)),
386 u"ndr_up_b_unit": groups.group(10),
387 u"pdr_low": float(groups.group(11)),
388 u"pdr_low_unit": groups.group(12),
389 u"pdr_low_b": float(groups.group(13)),
390 u"pdr_low_b_unit": groups.group(14),
391 u"pdr_lat_1": groups.group(15),
392 u"pdr_lat_2": groups.group(16),
393 u"pdr_up": float(groups.group(17)),
394 u"pdr_up_unit": groups.group(18),
395 u"pdr_up_b": float(groups.group(19)),
396 u"pdr_up_b_unit": groups.group(20)
398 except (AttributeError, IndexError, ValueError, KeyError):
401 def _process_lat(in_str):
402 """Extract min, avg, max values from latency string.
404 :param in_str: Latency string produced by robot framework.
406 :returns: Processed latency string or original string if a problem
410 in_list = in_str.split('/', 3)
414 return f"min={in_list[0]}, avg={in_list[1]}, max={in_list[2]}"
418 f"NDR Lower: {(data[u'ndr_low'] / 1e6):.2f}"
419 f"M{data[u'ndr_low_unit']}, "
420 f"{data[u'ndr_low_b']:.2f}{data[u'ndr_low_b_unit']}\n"
421 # f"NDR Upper: {(data[u'ndr_up'] / 1e6):.2f}"
422 # f"M{data[u'ndr_up_unit']}, "
423 # f"{data[u'ndr_up_b']:.2f}{data[u'ndr_up_b_unit']}\n"
424 f"NDR Latency W-E: {_process_lat(data[u'ndr_lat_1'])}\n"
425 f"NDR Latency E-W: {_process_lat(data[u'ndr_lat_2'])}\n"
426 f"PDR Lower: {(data[u'pdr_low'] / 1e6):.2f}"
427 f"M{data[u'pdr_low_unit']}, "
428 f"{data[u'pdr_low_b']:.2f}{data[u'pdr_low_b_unit']}\n"
429 # f"PDR Upper: {(data[u'pdr_up'] / 1e6):.2f}"
430 # f"M{data[u'pdr_up_unit']}, "
431 # f"{data[u'pdr_up_b']:.2f}{data[u'pdr_up_b_unit']}\n"
432 f"PDR Latency W-E: {_process_lat(data[u'pdr_lat_1'])}\n"
433 f"PDR Latency E-W: {_process_lat(data[u'pdr_lat_2'])}"
435 except (AttributeError, IndexError, ValueError, KeyError):
438 def _get_testbed(self, msg):
439 """Called when extraction of testbed IP is required.
440 The testbed is identified by TG node IP address.
442 :param msg: Message to process.
447 if msg.message.count(u"Setup of TG node") or \
448 msg.message.count(u"Setup of node TG host"):
449 reg_tg_ip = re.compile(
450 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
452 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
453 except (KeyError, ValueError, IndexError, AttributeError):
456 self._data[u"metadata"][u"testbed"] = self._testbed
457 self._msg_type = None
459 def _get_vpp_version(self, msg):
460 """Called when extraction of VPP version is required.
462 :param msg: Message to process.
467 if msg.message.count(u"return STDOUT Version:") or \
468 msg.message.count(u"VPP Version:") or \
469 msg.message.count(u"VPP version:"):
470 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
472 self._data[u"metadata"][u"version"] = self._version
473 self._msg_type = None
475 def _get_dpdk_version(self, msg):
476 """Called when extraction of DPDK version is required.
478 :param msg: Message to process.
483 if msg.message.count(u"DPDK Version:"):
485 self._version = str(re.search(
486 self.REGEX_VERSION_DPDK, msg.message).group(2))
487 self._data[u"metadata"][u"version"] = self._version
491 self._msg_type = None
493 def _get_timestamp(self, msg):
494 """Called when extraction of timestamp is required.
496 :param msg: Message to process.
501 self._timestamp = msg.timestamp[:14]
502 self._data[u"metadata"][u"generated"] = self._timestamp
503 self._msg_type = None
505 def _get_vat_history(self, msg):
506 """Called when extraction of VAT command history is required.
508 TODO: Remove when not needed.
510 :param msg: Message to process.
514 if msg.message.count(u"VAT command history:"):
515 self._conf_history_lookup_nr += 1
516 if self._conf_history_lookup_nr == 1:
517 self._data[u"tests"][self._test_id][u"conf-history"] = str()
519 self._msg_type = None
520 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
521 r"VAT command history:", u"",
522 msg.message, count=1).replace(u'\n', u' |br| ').\
525 self._data[u"tests"][self._test_id][u"conf-history"] += (
526 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
529 def _get_papi_history(self, msg):
530 """Called when extraction of PAPI command history is required.
532 :param msg: Message to process.
536 if msg.message.count(u"PAPI command history:"):
537 self._conf_history_lookup_nr += 1
538 if self._conf_history_lookup_nr == 1:
539 self._data[u"tests"][self._test_id][u"conf-history"] = str()
541 self._msg_type = None
542 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
543 r"PAPI command history:", u"",
544 msg.message, count=1).replace(u'\n', u' |br| ').\
546 self._data[u"tests"][self._test_id][u"conf-history"] += (
547 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
550 def _get_show_run(self, msg):
551 """Called when extraction of VPP operational data (output of CLI command
552 Show Runtime) is required.
554 :param msg: Message to process.
559 if not msg.message.count(u"stats runtime"):
562 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
563 self._data[u"tests"][self._test_id][u"show-run"] = dict()
565 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
569 host = groups.group(1)
570 except (AttributeError, IndexError):
573 sock = groups.group(2)
574 except (AttributeError, IndexError):
577 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
578 replace(u"'", u'"').replace(u'b"', u'"').
579 replace(u'u"', u'"').split(u":", 1)[1])
582 threads_nr = len(runtime[0][u"clocks"])
583 except (IndexError, KeyError):
586 dut = u"DUT{nr}".format(
587 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
592 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
596 for idx in range(threads_nr):
597 if item[u"vectors"][idx] > 0:
598 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
599 elif item[u"calls"][idx] > 0:
600 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
601 elif item[u"suspends"][idx] > 0:
602 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
606 if item[u"calls"][idx] > 0:
607 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
611 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
612 int(item[u"suspends"][idx]):
613 oper[u"threads"][idx].append([
616 item[u"vectors"][idx],
617 item[u"suspends"][idx],
622 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
624 def _get_ndrpdr_throughput(self, msg):
625 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
628 :param msg: The test message to be parsed.
630 :returns: Parsed data as a dict and the status (PASS/FAIL).
631 :rtype: tuple(dict, str)
635 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
636 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
639 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
641 if groups is not None:
643 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
644 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
645 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
646 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
648 except (IndexError, ValueError):
651 return throughput, status
653 def _get_plr_throughput(self, msg):
654 """Get PLRsearch lower bound and PLRsearch upper bound from the test
657 :param msg: The test message to be parsed.
659 :returns: Parsed data as a dict and the status (PASS/FAIL).
660 :rtype: tuple(dict, str)
668 groups = re.search(self.REGEX_PLR_RATE, msg)
670 if groups is not None:
672 throughput[u"LOWER"] = float(groups.group(1))
673 throughput[u"UPPER"] = float(groups.group(2))
675 except (IndexError, ValueError):
678 return throughput, status
680 def _get_ndrpdr_latency(self, msg):
681 """Get LATENCY from the test message.
683 :param msg: The test message to be parsed.
685 :returns: Parsed data as a dict and the status (PASS/FAIL).
686 :rtype: tuple(dict, str)
696 u"direction1": copy.copy(latency_default),
697 u"direction2": copy.copy(latency_default)
700 u"direction1": copy.copy(latency_default),
701 u"direction2": copy.copy(latency_default)
704 u"direction1": copy.copy(latency_default),
705 u"direction2": copy.copy(latency_default)
708 u"direction1": copy.copy(latency_default),
709 u"direction2": copy.copy(latency_default)
712 u"direction1": copy.copy(latency_default),
713 u"direction2": copy.copy(latency_default)
716 u"direction1": copy.copy(latency_default),
717 u"direction2": copy.copy(latency_default)
721 # TODO: Rewrite when long and base are not needed
722 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
724 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
726 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
728 return latency, u"FAIL"
730 def process_latency(in_str):
731 """Return object with parsed latency values.
733 TODO: Define class for the return type.
735 :param in_str: Input string, min/avg/max/hdrh format.
737 :returns: Dict with corresponding keys, except hdrh float values.
739 :throws IndexError: If in_str does not have enough substrings.
740 :throws ValueError: If a substring does not convert to float.
742 in_list = in_str.split('/', 3)
745 u"min": float(in_list[0]),
746 u"avg": float(in_list[1]),
747 u"max": float(in_list[2]),
751 if len(in_list) == 4:
752 rval[u"hdrh"] = str(in_list[3])
757 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
758 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
759 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
760 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
761 if groups.lastindex == 4:
762 return latency, u"PASS"
763 except (IndexError, ValueError):
767 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
768 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
769 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
770 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
771 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
772 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
773 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
774 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
775 if groups.lastindex == 12:
776 return latency, u"PASS"
777 except (IndexError, ValueError):
780 # TODO: Remove when not needed
781 latency[u"NDR10"] = {
782 u"direction1": copy.copy(latency_default),
783 u"direction2": copy.copy(latency_default)
785 latency[u"NDR50"] = {
786 u"direction1": copy.copy(latency_default),
787 u"direction2": copy.copy(latency_default)
789 latency[u"NDR90"] = {
790 u"direction1": copy.copy(latency_default),
791 u"direction2": copy.copy(latency_default)
794 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
795 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
796 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
797 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
798 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
799 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
800 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
801 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
802 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
803 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
804 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
805 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
806 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
807 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
808 return latency, u"PASS"
809 except (IndexError, ValueError):
812 return latency, u"FAIL"
814 def visit_suite(self, suite):
815 """Implements traversing through the suite and its direct children.
817 :param suite: Suite to process.
821 if self.start_suite(suite) is not False:
822 suite.suites.visit(self)
823 suite.tests.visit(self)
824 self.end_suite(suite)
826 def start_suite(self, suite):
827 """Called when suite starts.
829 :param suite: Suite to process.
835 parent_name = suite.parent.name
836 except AttributeError:
839 doc_str = suite.doc.\
840 replace(u'"', u"'").\
841 replace(u'\n', u' ').\
842 replace(u'\r', u'').\
843 replace(u'*[', u' |br| *[').\
844 replace(u"*", u"**").\
845 replace(u' |br| *[', u'*[', 1)
847 self._data[u"suites"][suite.longname.lower().
849 replace(u" ", u"_")] = {
850 u"name": suite.name.lower(),
852 u"parent": parent_name,
853 u"level": len(suite.longname.split(u"."))
856 suite.keywords.visit(self)
858 def end_suite(self, suite):
859 """Called when suite ends.
861 :param suite: Suite to process.
866 def visit_test(self, test):
867 """Implements traversing through the test.
869 :param test: Test to process.
873 if self.start_test(test) is not False:
874 test.keywords.visit(self)
877 def start_test(self, test):
878 """Called when test starts.
880 :param test: Test to process.
885 longname_orig = test.longname.lower()
887 # Check the ignore list
888 if longname_orig in self._ignore:
891 tags = [str(tag) for tag in test.tags]
894 # Change the TC long name and name if defined in the mapping table
895 longname = self._mapping.get(longname_orig, None)
896 if longname is not None:
897 name = longname.split(u'.')[-1]
899 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
903 longname = longname_orig
904 name = test.name.lower()
906 # Remove TC number from the TC long name (backward compatibility):
907 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
908 # Remove TC number from the TC name (not needed):
909 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
911 test_result[u"parent"] = test.parent.name.lower()
912 test_result[u"tags"] = tags
913 test_result["doc"] = test.doc.\
914 replace(u'"', u"'").\
915 replace(u'\n', u' ').\
916 replace(u'\r', u'').\
917 replace(u'[', u' |br| [').\
918 replace(u' |br| [', u'[', 1)
919 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
920 replace(u'\n', u' |br| ').\
921 replace(u'\r', u'').\
923 test_result[u"type"] = u"FUNC"
924 test_result[u"status"] = test.status
926 if u"PERFTEST" in tags:
927 # Replace info about cores (e.g. -1c-) with the info about threads
928 # and cores (e.g. -1t1c-) in the long test case names and in the
929 # test case names if necessary.
930 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
934 for tag in test_result[u"tags"]:
935 groups = re.search(self.REGEX_TC_TAG, tag)
941 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
942 f"-{tag_tc.lower()}-",
945 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
946 f"-{tag_tc.lower()}-",
950 test_result[u"status"] = u"FAIL"
951 self._data[u"tests"][self._test_id] = test_result
953 f"The test {self._test_id} has no or more than one "
954 f"multi-threading tags.\n"
955 f"Tags: {test_result[u'tags']}"
959 if test.status == u"PASS":
960 if u"NDRPDR" in tags:
961 test_result[u"type"] = u"NDRPDR"
962 test_result[u"throughput"], test_result[u"status"] = \
963 self._get_ndrpdr_throughput(test.message)
964 test_result[u"latency"], test_result[u"status"] = \
965 self._get_ndrpdr_latency(test.message)
966 elif u"SOAK" in tags:
967 test_result[u"type"] = u"SOAK"
968 test_result[u"throughput"], test_result[u"status"] = \
969 self._get_plr_throughput(test.message)
971 test_result[u"type"] = u"TCP"
972 groups = re.search(self.REGEX_TCP, test.message)
973 test_result[u"result"] = int(groups.group(2))
974 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
976 test_result[u"type"] = u"MRR"
978 test_result[u"type"] = u"BMRR"
980 test_result[u"result"] = dict()
981 groups = re.search(self.REGEX_BMRR, test.message)
982 if groups is not None:
983 items_str = groups.group(1)
984 items_float = [float(item.strip()) for item
985 in items_str.split(",")]
986 # Use whole list in CSIT-1180.
987 stats = jumpavg.AvgStdevStats.for_runs(items_float)
988 test_result[u"result"][u"receive-rate"] = stats.avg
990 groups = re.search(self.REGEX_MRR, test.message)
991 test_result[u"result"][u"receive-rate"] = \
992 float(groups.group(3)) / float(groups.group(1))
993 elif u"RECONF" in tags:
994 test_result[u"type"] = u"RECONF"
995 test_result[u"result"] = None
997 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
998 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
999 test_result[u"result"] = {
1000 u"loss": int(grps_loss.group(1)),
1001 u"time": float(grps_time.group(1))
1003 except (AttributeError, IndexError, ValueError, TypeError):
1004 test_result[u"status"] = u"FAIL"
1006 test_result[u"status"] = u"FAIL"
1007 self._data[u"tests"][self._test_id] = test_result
1010 self._data[u"tests"][self._test_id] = test_result
1012 def end_test(self, test):
1013 """Called when test ends.
1015 :param test: Test to process.
1020 def visit_keyword(self, keyword):
1021 """Implements traversing through the keyword and its child keywords.
1023 :param keyword: Keyword to process.
1024 :type keyword: Keyword
1027 if self.start_keyword(keyword) is not False:
1028 self.end_keyword(keyword)
1030 def start_keyword(self, keyword):
1031 """Called when keyword starts. Default implementation does nothing.
1033 :param keyword: Keyword to process.
1034 :type keyword: Keyword
1038 if keyword.type == u"setup":
1039 self.visit_setup_kw(keyword)
1040 elif keyword.type == u"teardown":
1041 self.visit_teardown_kw(keyword)
1043 self.visit_test_kw(keyword)
1044 except AttributeError:
1047 def end_keyword(self, keyword):
1048 """Called when keyword ends. Default implementation does nothing.
1050 :param keyword: Keyword to process.
1051 :type keyword: Keyword
1055 def visit_test_kw(self, test_kw):
1056 """Implements traversing through the test keyword and its child
1059 :param test_kw: Keyword to process.
1060 :type test_kw: Keyword
1063 for keyword in test_kw.keywords:
1064 if self.start_test_kw(keyword) is not False:
1065 self.visit_test_kw(keyword)
1066 self.end_test_kw(keyword)
1068 def start_test_kw(self, test_kw):
1069 """Called when test keyword starts. Default implementation does
1072 :param test_kw: Keyword to process.
1073 :type test_kw: Keyword
1076 if test_kw.name.count(u"Show Runtime On All Duts") or \
1077 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1078 self._msg_type = u"test-show-runtime"
1079 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1080 self._msg_type = u"dpdk-version"
1083 test_kw.messages.visit(self)
1085 def end_test_kw(self, test_kw):
1086 """Called when keyword ends. Default implementation does nothing.
1088 :param test_kw: Keyword to process.
1089 :type test_kw: Keyword
1093 def visit_setup_kw(self, setup_kw):
1094 """Implements traversing through the teardown keyword and its child
1097 :param setup_kw: Keyword to process.
1098 :type setup_kw: Keyword
1101 for keyword in setup_kw.keywords:
1102 if self.start_setup_kw(keyword) is not False:
1103 self.visit_setup_kw(keyword)
1104 self.end_setup_kw(keyword)
1106 def start_setup_kw(self, setup_kw):
1107 """Called when teardown keyword starts. Default implementation does
1110 :param setup_kw: Keyword to process.
1111 :type setup_kw: Keyword
1114 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1115 and not self._version:
1116 self._msg_type = u"vpp-version"
1117 elif setup_kw.name.count(u"Set Global Variable") \
1118 and not self._timestamp:
1119 self._msg_type = u"timestamp"
1120 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1121 self._msg_type = u"testbed"
1124 setup_kw.messages.visit(self)
1126 def end_setup_kw(self, setup_kw):
1127 """Called when keyword ends. Default implementation does nothing.
1129 :param setup_kw: Keyword to process.
1130 :type setup_kw: Keyword
1134 def visit_teardown_kw(self, teardown_kw):
1135 """Implements traversing through the teardown keyword and its child
1138 :param teardown_kw: Keyword to process.
1139 :type teardown_kw: Keyword
1142 for keyword in teardown_kw.keywords:
1143 if self.start_teardown_kw(keyword) is not False:
1144 self.visit_teardown_kw(keyword)
1145 self.end_teardown_kw(keyword)
1147 def start_teardown_kw(self, teardown_kw):
1148 """Called when teardown keyword starts
1150 :param teardown_kw: Keyword to process.
1151 :type teardown_kw: Keyword
1155 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1156 # TODO: Remove when not needed:
1157 self._conf_history_lookup_nr = 0
1158 self._msg_type = u"teardown-vat-history"
1159 teardown_kw.messages.visit(self)
1160 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1161 self._conf_history_lookup_nr = 0
1162 self._msg_type = u"teardown-papi-history"
1163 teardown_kw.messages.visit(self)
1165 def end_teardown_kw(self, teardown_kw):
1166 """Called when keyword ends. Default implementation does nothing.
1168 :param teardown_kw: Keyword to process.
1169 :type teardown_kw: Keyword
1173 def visit_message(self, msg):
1174 """Implements visiting the message.
1176 :param msg: Message to process.
1180 if self.start_message(msg) is not False:
1181 self.end_message(msg)
1183 def start_message(self, msg):
1184 """Called when message starts. Get required information from messages:
1187 :param msg: Message to process.
1193 self.parse_msg[self._msg_type](msg)
1195 def end_message(self, msg):
1196 """Called when message ends. Default implementation does nothing.
1198 :param msg: Message to process.
1207 The data is extracted from output.xml files generated by Jenkins jobs and
1208 stored in pandas' DataFrames.
1214 (as described in ExecutionChecker documentation)
1216 (as described in ExecutionChecker documentation)
1218 (as described in ExecutionChecker documentation)
1221 def __init__(self, spec):
1224 :param spec: Specification.
1225 :type spec: Specification
1232 self._input_data = pd.Series()
1236 """Getter - Input data.
1238 :returns: Input data
1239 :rtype: pandas.Series
1241 return self._input_data
1243 def metadata(self, job, build):
1244 """Getter - metadata
1246 :param job: Job which metadata we want.
1247 :param build: Build which metadata we want.
1251 :rtype: pandas.Series
1254 return self.data[job][build][u"metadata"]
1256 def suites(self, job, build):
1259 :param job: Job which suites we want.
1260 :param build: Build which suites we want.
1264 :rtype: pandas.Series
1267 return self.data[job][str(build)][u"suites"]
1269 def tests(self, job, build):
1272 :param job: Job which tests we want.
1273 :param build: Build which tests we want.
1277 :rtype: pandas.Series
1280 return self.data[job][build][u"tests"]
1282 def _parse_tests(self, job, build, log):
1283 """Process data from robot output.xml file and return JSON structured
1286 :param job: The name of job which build output data will be processed.
1287 :param build: The build which output data will be processed.
1288 :param log: List of log messages.
1291 :type log: list of tuples (severity, msg)
1292 :returns: JSON data structure.
1301 with open(build[u"file-name"], u'r') as data_file:
1303 result = ExecutionResult(data_file)
1304 except errors.DataError as err:
1306 (u"ERROR", f"Error occurred while parsing output.xml: "
1310 checker = ExecutionChecker(metadata, self._cfg.mapping,
1312 result.visit(checker)
1316 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1317 """Download and parse the input data file.
1319 :param pid: PID of the process executing this method.
1320 :param job: Name of the Jenkins job which generated the processed input
1322 :param build: Information about the Jenkins build which generated the
1323 processed input file.
1324 :param repeat: Repeat the download specified number of times if not
1335 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1343 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1351 f"It is not possible to download the input data file from the "
1352 f"job {job}, build {build[u'build']}, or it is damaged. "
1358 f" Processing data from the build {build[u'build']} ...")
1360 data = self._parse_tests(job, build, logs)
1364 f"Input data file from the job {job}, build "
1365 f"{build[u'build']} is damaged. Skipped.")
1368 state = u"processed"
1371 remove(build[u"file-name"])
1372 except OSError as err:
1374 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1378 # If the time-period is defined in the specification file, remove all
1379 # files which are outside the time period.
1380 timeperiod = self._cfg.input.get(u"time-period", None)
1381 if timeperiod and data:
1383 timeperiod = timedelta(int(timeperiod))
1384 metadata = data.get(u"metadata", None)
1386 generated = metadata.get(u"generated", None)
1388 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1389 if (now - generated) > timeperiod:
1390 # Remove the data and the file:
1395 f" The build {job}/{build[u'build']} is "
1396 f"outdated, will be removed.")
1398 logs.append((u"INFO", u" Done."))
1400 for level, line in logs:
1401 if level == u"INFO":
1403 elif level == u"ERROR":
1405 elif level == u"DEBUG":
1407 elif level == u"CRITICAL":
1408 logging.critical(line)
1409 elif level == u"WARNING":
1410 logging.warning(line)
1412 return {u"data": data, u"state": state, u"job": job, u"build": build}
1414 def download_and_parse_data(self, repeat=1):
1415 """Download the input data files, parse input data from input files and
1416 store in pandas' Series.
1418 :param repeat: Repeat the download specified number of times if not
1423 logging.info(u"Downloading and parsing input files ...")
1425 for job, builds in self._cfg.builds.items():
1426 for build in builds:
1428 result = self._download_and_parse_build(job, build, repeat)
1429 build_nr = result[u"build"][u"build"]
1432 data = result[u"data"]
1433 build_data = pd.Series({
1434 u"metadata": pd.Series(
1435 list(data[u"metadata"].values()),
1436 index=list(data[u"metadata"].keys())
1438 u"suites": pd.Series(
1439 list(data[u"suites"].values()),
1440 index=list(data[u"suites"].keys())
1442 u"tests": pd.Series(
1443 list(data[u"tests"].values()),
1444 index=list(data[u"tests"].keys())
1448 if self._input_data.get(job, None) is None:
1449 self._input_data[job] = pd.Series()
1450 self._input_data[job][str(build_nr)] = build_data
1452 self._cfg.set_input_file_name(
1453 job, build_nr, result[u"build"][u"file-name"])
1455 self._cfg.set_input_state(job, build_nr, result[u"state"])
1458 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1459 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1461 logging.info(u"Done.")
1464 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1465 """Return the index of character in the string which is the end of tag.
1467 :param tag_filter: The string where the end of tag is being searched.
1468 :param start: The index where the searching is stated.
1469 :param closer: The character which is the tag closer.
1470 :type tag_filter: str
1473 :returns: The index of the tag closer.
1478 idx_opener = tag_filter.index(closer, start)
1479 return tag_filter.index(closer, idx_opener + 1)
1484 def _condition(tag_filter):
1485 """Create a conditional statement from the given tag filter.
1487 :param tag_filter: Filter based on tags from the element specification.
1488 :type tag_filter: str
1489 :returns: Conditional statement which can be evaluated.
1495 index = InputData._end_of_tag(tag_filter, index)
1499 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1501 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1502 continue_on_error=False):
1503 """Filter required data from the given jobs and builds.
1505 The output data structure is:
1509 - test (or suite) 1 ID:
1515 - test (or suite) n ID:
1522 :param element: Element which will use the filtered data.
1523 :param params: Parameters which will be included in the output. If None,
1524 all parameters are included.
1525 :param data: If not None, this data is used instead of data specified
1527 :param data_set: The set of data to be filtered: tests, suites,
1529 :param continue_on_error: Continue if there is error while reading the
1530 data. The Item will be empty then
1531 :type element: pandas.Series
1535 :type continue_on_error: bool
1536 :returns: Filtered data.
1537 :rtype pandas.Series
1541 if data_set == "suites":
1543 elif element[u"filter"] in (u"all", u"template"):
1546 cond = InputData._condition(element[u"filter"])
1547 logging.debug(f" Filter: {cond}")
1549 logging.error(u" No filter defined.")
1553 params = element.get(u"parameters", None)
1555 params.append(u"type")
1557 data_to_filter = data if data else element[u"data"]
1560 for job, builds in data_to_filter.items():
1561 data[job] = pd.Series()
1562 for build in builds:
1563 data[job][str(build)] = pd.Series()
1566 self.data[job][str(build)][data_set].items())
1568 if continue_on_error:
1572 for test_id, test_data in data_dict.items():
1573 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1574 data[job][str(build)][test_id] = pd.Series()
1576 for param, val in test_data.items():
1577 data[job][str(build)][test_id][param] = val
1579 for param in params:
1581 data[job][str(build)][test_id][param] =\
1584 data[job][str(build)][test_id][param] =\
1588 except (KeyError, IndexError, ValueError) as err:
1590 f"Missing mandatory parameter in the element specification: "
1594 except AttributeError as err:
1595 logging.error(repr(err))
1597 except SyntaxError as err:
1599 f"The filter {cond} is not correct. Check if all tags are "
1600 f"enclosed by apostrophes.\n{repr(err)}"
1604 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1605 continue_on_error=False):
1606 """Filter required data from the given jobs and builds.
1608 The output data structure is:
1612 - test (or suite) 1 ID:
1618 - test (or suite) n ID:
1625 :param element: Element which will use the filtered data.
1626 :param params: Parameters which will be included in the output. If None,
1627 all parameters are included.
1628 :param data_set: The set of data to be filtered: tests, suites,
1630 :param continue_on_error: Continue if there is error while reading the
1631 data. The Item will be empty then
1632 :type element: pandas.Series
1635 :type continue_on_error: bool
1636 :returns: Filtered data.
1637 :rtype pandas.Series
1640 include = element.get(u"include", None)
1642 logging.warning(u"No tests to include, skipping the element.")
1646 params = element.get(u"parameters", None)
1648 params.append(u"type")
1652 for job, builds in element[u"data"].items():
1653 data[job] = pd.Series()
1654 for build in builds:
1655 data[job][str(build)] = pd.Series()
1656 for test in include:
1658 reg_ex = re.compile(str(test).lower())
1659 for test_id in self.data[job][
1660 str(build)][data_set].keys():
1661 if re.match(reg_ex, str(test_id).lower()):
1662 test_data = self.data[job][
1663 str(build)][data_set][test_id]
1664 data[job][str(build)][test_id] = pd.Series()
1666 for param, val in test_data.items():
1667 data[job][str(build)][test_id]\
1670 for param in params:
1672 data[job][str(build)][
1676 data[job][str(build)][
1677 test_id][param] = u"No Data"
1678 except KeyError as err:
1679 logging.error(repr(err))
1680 if continue_on_error:
1685 except (KeyError, IndexError, ValueError) as err:
1687 f"Missing mandatory parameter in the element "
1688 f"specification: {repr(err)}"
1691 except AttributeError as err:
1692 logging.error(repr(err))
1696 def merge_data(data):
1697 """Merge data from more jobs and builds to a simple data structure.
1699 The output data structure is:
1701 - test (suite) 1 ID:
1707 - test (suite) n ID:
1710 :param data: Data to merge.
1711 :type data: pandas.Series
1712 :returns: Merged data.
1713 :rtype: pandas.Series
1716 logging.info(u" Merging data ...")
1718 merged_data = pd.Series()
1719 for builds in data.values:
1720 for item in builds.values:
1721 for item_id, item_data in item.items():
1722 merged_data[item_id] = item_data
1726 def print_all_oper_data(self):
1727 """Print all operational data to console.
1735 u"Cycles per Packet",
1736 u"Average Vector Size"
1739 for job in self._input_data.values:
1740 for build in job.values:
1741 for test_id, test_data in build[u"tests"].items():
1743 if test_data.get(u"show-run", None) is None:
1745 for dut_name, data in test_data[u"show-run"].items():
1746 if data.get(u"threads", None) is None:
1748 print(f"Host IP: {data.get(u'host', '')}, "
1749 f"Socket: {data.get(u'socket', '')}")
1750 for thread_nr, thread in data[u"threads"].items():
1751 txt_table = prettytable.PrettyTable(tbl_hdr)
1754 txt_table.add_row(row)
1756 if len(thread) == 0:
1759 avg = f", Average Vector Size per Node: " \
1760 f"{(avg / len(thread)):.2f}"
1761 th_name = u"main" if thread_nr == 0 \
1762 else f"worker_{thread_nr}"
1763 print(f"{dut_name}, {th_name}{avg}")
1764 txt_table.float_format = u".2"
1765 txt_table.align = u"r"
1766 txt_table.align[u"Name"] = u"l"
1767 print(f"{txt_table.get_string()}\n")