1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "conf-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
132 "parent": "Name of the parent of the test",
133 "doc": "Test documentation",
134 "msg": "Test message",
135 "tags": ["tag 1", "tag 2", "tag n"],
137 "status": "PASS" | "FAIL",
144 "parent": "Name of the parent of the test",
145 "doc": "Test documentation",
146 "msg": "Test message",
147 "tags": ["tag 1", "tag 2", "tag n"],
148 "type": "MRR" | "BMRR",
149 "status": "PASS" | "FAIL",
151 "receive-rate": float,
152 # Average of a list, computed using AvgStdevStats.
153 # In CSIT-1180, replace with List[float].
167 "metadata": { # Optional
168 "version": "VPP version",
169 "job": "Jenkins job name",
170 "build": "Information about the build"
174 "doc": "Suite 1 documentation",
175 "parent": "Suite 1 parent",
176 "level": "Level of the suite in the suite hierarchy"
179 "doc": "Suite N documentation",
180 "parent": "Suite 2 parent",
181 "level": "Level of the suite in the suite hierarchy"
187 "parent": "Name of the parent of the test",
188 "doc": "Test documentation"
189 "msg": "Test message"
190 "tags": ["tag 1", "tag 2", "tag n"],
191 "conf-history": "DUT1 and DUT2 VAT History"
192 "show-run": "Show Run"
193 "status": "PASS" | "FAIL"
201 .. note:: ID is the lowercase full path to the test.
204 REGEX_PLR_RATE = re.compile(
205 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
206 r'PLRsearch upper bound::?\s(\d+.\d+)'
208 REGEX_NDRPDR_RATE = re.compile(
209 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'NDR_UPPER:\s(\d+.\d+).*\n'
211 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
212 r'PDR_UPPER:\s(\d+.\d+)'
214 REGEX_PERF_MSG_INFO = re.compile(
215 r'NDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
216 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
217 r'NDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
218 r'PDR_LOWER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*).*\n'
219 r'LATENCY.*\[\'(.*)\', \'(.*)\'\].*\n'
220 r'PDR_UPPER:\s(\d+.\d+)\s([a-zA-Z]*).*\s(\d+.\d+)\s([a-zA-Z]*)'
222 # TODO: Remove when not needed
223 REGEX_NDRPDR_LAT_BASE = re.compile(
224 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
225 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
227 REGEX_NDRPDR_LAT = re.compile(
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
229 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
230 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
231 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
232 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
233 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
235 # TODO: Remove when not needed
236 REGEX_NDRPDR_LAT_LONG = re.compile(
237 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
238 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
239 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
240 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
241 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
242 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
247 REGEX_VERSION_VPP = re.compile(
248 r"(return STDOUT Version:\s*|"
249 r"VPP Version:\s*|VPP version:\s*)(.*)"
251 REGEX_VERSION_DPDK = re.compile(
252 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
254 REGEX_TCP = re.compile(
255 r'Total\s(rps|cps|throughput):\s(\d*).*$'
257 REGEX_MRR = re.compile(
258 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
259 r'tx\s(\d*),\srx\s(\d*)'
261 REGEX_BMRR = re.compile(
262 r'Maximum Receive Rate trial results'
263 r' in packets per second: \[(.*)\]'
265 REGEX_RECONF_LOSS = re.compile(
266 r'Packets lost due to reconfig: (\d*)'
268 REGEX_RECONF_TIME = re.compile(
269 r'Implied time lost: (\d*.[\de-]*)'
271 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
273 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
275 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
277 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
279 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
281 def __init__(self, metadata, mapping, ignore):
284 :param metadata: Key-value pairs to be included in "metadata" part of
286 :param mapping: Mapping of the old names of test cases to the new
288 :param ignore: List of TCs to be ignored.
294 # Type of message to parse out from the test messages
295 self._msg_type = None
301 self._timestamp = None
303 # Testbed. The testbed is identified by TG node IP address.
306 # Mapping of TCs long names
307 self._mapping = mapping
310 self._ignore = ignore
312 # Number of PAPI History messages found:
314 # 1 - PAPI History of DUT1
315 # 2 - PAPI History of DUT2
316 self._conf_history_lookup_nr = 0
318 # Test ID of currently processed test- the lowercase full path to the
322 # The main data structure
324 u"metadata": OrderedDict(),
325 u"suites": OrderedDict(),
326 u"tests": OrderedDict()
329 # Save the provided metadata
330 for key, val in metadata.items():
331 self._data[u"metadata"][key] = val
333 # Dictionary defining the methods used to parse different types of
336 u"timestamp": self._get_timestamp,
337 u"vpp-version": self._get_vpp_version,
338 u"dpdk-version": self._get_dpdk_version,
339 # TODO: Remove when not needed:
340 u"teardown-vat-history": self._get_vat_history,
341 u"teardown-papi-history": self._get_papi_history,
342 u"test-show-runtime": self._get_show_run,
343 u"testbed": self._get_testbed
348 """Getter - Data parsed from the XML file.
350 :returns: Data parsed from the XML file.
355 def _get_data_from_perf_test_msg(self, msg):
363 from message of NDRPDR performance tests.
365 :param msg: Message to be processed.
367 :returns: Processed message or original message if a problem occurs.
371 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
372 if not groups or groups.lastindex != 20:
377 u"ndr_low": float(groups.group(1)),
378 u"ndr_low_unit": groups.group(2),
379 u"ndr_low_b": float(groups.group(3)),
380 u"ndr_low_b_unit": groups.group(4),
381 u"ndr_lat_1": groups.group(5),
382 u"ndr_lat_2": groups.group(6),
383 u"ndr_up": float(groups.group(7)),
384 u"ndr_up_unit": groups.group(8),
385 u"ndr_up_b": float(groups.group(9)),
386 u"ndr_up_b_unit": groups.group(10),
387 u"pdr_low": float(groups.group(11)),
388 u"pdr_low_unit": groups.group(12),
389 u"pdr_low_b": float(groups.group(13)),
390 u"pdr_low_b_unit": groups.group(14),
391 u"pdr_lat_1": groups.group(15),
392 u"pdr_lat_2": groups.group(16),
393 u"pdr_up": float(groups.group(17)),
394 u"pdr_up_unit": groups.group(18),
395 u"pdr_up_b": float(groups.group(19)),
396 u"pdr_up_b_unit": groups.group(20)
398 except (AttributeError, IndexError, ValueError, KeyError):
401 def _process_lat(in_str_1, in_str_2):
402 """Extract min, avg, max values from latency string.
404 :param in_str_1: Latency string for one direction produced by robot
406 :param in_str_2: Latency string for second direction produced by
410 :returns: Processed latency string or original string if a problem
414 in_list_1 = in_str_1.split('/', 3)
415 if len(in_list_1) < 3:
416 return u"Min/Avg/Max, -1/-1/-1, -1/-1/-1 uSec."
418 in_list_2 = in_str_2.split('/', 3)
419 if len(in_list_2) < 3:
420 return u"Min/Avg/Max, -1/-1/-1, -1/-1/-1 uSec."
422 return f"Min/Avg/Max, " \
423 f"{in_list_1[0]}/{in_list_1[1]}/{in_list_1[2]}, " \
424 f"{in_list_2[0]}/{in_list_2[1]}/{in_list_2[2]} uSec."
428 f"NDR Throughput: {(data[u'ndr_low'] / 1e6):.2f} "
429 f"M{data[u'ndr_low_unit']}, "
430 f"{data[u'ndr_low_b']:.2f} {data[u'ndr_low_b_unit']}.\n"
431 f"One-Way Latency at NDR: "
432 f"{_process_lat(data[u'ndr_lat_1'], data[u'ndr_lat_2'])}\n"
433 f"PDR Throughput: {(data[u'pdr_low'] / 1e6):.2f} "
434 f"M{data[u'pdr_low_unit']}, "
435 f"{data[u'pdr_low_b']:.2f} {data[u'pdr_low_b_unit']}.\n"
436 f"One-Way Latency at PDR: "
437 f"{_process_lat(data[u'pdr_lat_1'], data[u'pdr_lat_2'])}"
439 except (AttributeError, IndexError, ValueError, KeyError):
442 def _get_testbed(self, msg):
443 """Called when extraction of testbed IP is required.
444 The testbed is identified by TG node IP address.
446 :param msg: Message to process.
451 if msg.message.count(u"Setup of TG node") or \
452 msg.message.count(u"Setup of node TG host"):
453 reg_tg_ip = re.compile(
454 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
456 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
457 except (KeyError, ValueError, IndexError, AttributeError):
460 self._data[u"metadata"][u"testbed"] = self._testbed
461 self._msg_type = None
463 def _get_vpp_version(self, msg):
464 """Called when extraction of VPP version is required.
466 :param msg: Message to process.
471 if msg.message.count(u"return STDOUT Version:") or \
472 msg.message.count(u"VPP Version:") or \
473 msg.message.count(u"VPP version:"):
474 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
476 self._data[u"metadata"][u"version"] = self._version
477 self._msg_type = None
479 def _get_dpdk_version(self, msg):
480 """Called when extraction of DPDK version is required.
482 :param msg: Message to process.
487 if msg.message.count(u"DPDK Version:"):
489 self._version = str(re.search(
490 self.REGEX_VERSION_DPDK, msg.message).group(2))
491 self._data[u"metadata"][u"version"] = self._version
495 self._msg_type = None
497 def _get_timestamp(self, msg):
498 """Called when extraction of timestamp is required.
500 :param msg: Message to process.
505 self._timestamp = msg.timestamp[:14]
506 self._data[u"metadata"][u"generated"] = self._timestamp
507 self._msg_type = None
509 def _get_vat_history(self, msg):
510 """Called when extraction of VAT command history is required.
512 TODO: Remove when not needed.
514 :param msg: Message to process.
518 if msg.message.count(u"VAT command history:"):
519 self._conf_history_lookup_nr += 1
520 if self._conf_history_lookup_nr == 1:
521 self._data[u"tests"][self._test_id][u"conf-history"] = str()
523 self._msg_type = None
524 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
525 r"VAT command history:", u"",
526 msg.message, count=1).replace(u'\n', u' |br| ').\
529 self._data[u"tests"][self._test_id][u"conf-history"] += (
530 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
533 def _get_papi_history(self, msg):
534 """Called when extraction of PAPI command history is required.
536 :param msg: Message to process.
540 if msg.message.count(u"PAPI command history:"):
541 self._conf_history_lookup_nr += 1
542 if self._conf_history_lookup_nr == 1:
543 self._data[u"tests"][self._test_id][u"conf-history"] = str()
545 self._msg_type = None
546 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
547 r"PAPI command history:", u"",
548 msg.message, count=1).replace(u'\n', u' |br| ').\
550 self._data[u"tests"][self._test_id][u"conf-history"] += (
551 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
554 def _get_show_run(self, msg):
555 """Called when extraction of VPP operational data (output of CLI command
556 Show Runtime) is required.
558 :param msg: Message to process.
563 if not msg.message.count(u"stats runtime"):
566 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
567 self._data[u"tests"][self._test_id][u"show-run"] = dict()
569 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
573 host = groups.group(1)
574 except (AttributeError, IndexError):
577 sock = groups.group(2)
578 except (AttributeError, IndexError):
581 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
582 replace(u"'", u'"').replace(u'b"', u'"').
583 replace(u'u"', u'"').split(u":", 1)[1])
586 threads_nr = len(runtime[0][u"clocks"])
587 except (IndexError, KeyError):
590 dut = u"DUT{nr}".format(
591 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
596 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
600 for idx in range(threads_nr):
601 if item[u"vectors"][idx] > 0:
602 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
603 elif item[u"calls"][idx] > 0:
604 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
605 elif item[u"suspends"][idx] > 0:
606 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
610 if item[u"calls"][idx] > 0:
611 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
615 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
616 int(item[u"suspends"][idx]):
617 oper[u"threads"][idx].append([
620 item[u"vectors"][idx],
621 item[u"suspends"][idx],
626 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
628 def _get_ndrpdr_throughput(self, msg):
629 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
632 :param msg: The test message to be parsed.
634 :returns: Parsed data as a dict and the status (PASS/FAIL).
635 :rtype: tuple(dict, str)
639 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
640 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
643 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
645 if groups is not None:
647 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
648 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
649 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
650 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
652 except (IndexError, ValueError):
655 return throughput, status
657 def _get_plr_throughput(self, msg):
658 """Get PLRsearch lower bound and PLRsearch upper bound from the test
661 :param msg: The test message to be parsed.
663 :returns: Parsed data as a dict and the status (PASS/FAIL).
664 :rtype: tuple(dict, str)
672 groups = re.search(self.REGEX_PLR_RATE, msg)
674 if groups is not None:
676 throughput[u"LOWER"] = float(groups.group(1))
677 throughput[u"UPPER"] = float(groups.group(2))
679 except (IndexError, ValueError):
682 return throughput, status
684 def _get_ndrpdr_latency(self, msg):
685 """Get LATENCY from the test message.
687 :param msg: The test message to be parsed.
689 :returns: Parsed data as a dict and the status (PASS/FAIL).
690 :rtype: tuple(dict, str)
700 u"direction1": copy.copy(latency_default),
701 u"direction2": copy.copy(latency_default)
704 u"direction1": copy.copy(latency_default),
705 u"direction2": copy.copy(latency_default)
708 u"direction1": copy.copy(latency_default),
709 u"direction2": copy.copy(latency_default)
712 u"direction1": copy.copy(latency_default),
713 u"direction2": copy.copy(latency_default)
716 u"direction1": copy.copy(latency_default),
717 u"direction2": copy.copy(latency_default)
720 u"direction1": copy.copy(latency_default),
721 u"direction2": copy.copy(latency_default)
725 # TODO: Rewrite when long and base are not needed
726 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
728 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
730 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
732 return latency, u"FAIL"
734 def process_latency(in_str):
735 """Return object with parsed latency values.
737 TODO: Define class for the return type.
739 :param in_str: Input string, min/avg/max/hdrh format.
741 :returns: Dict with corresponding keys, except hdrh float values.
743 :throws IndexError: If in_str does not have enough substrings.
744 :throws ValueError: If a substring does not convert to float.
746 in_list = in_str.split('/', 3)
749 u"min": float(in_list[0]),
750 u"avg": float(in_list[1]),
751 u"max": float(in_list[2]),
755 if len(in_list) == 4:
756 rval[u"hdrh"] = str(in_list[3])
761 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
762 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
763 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
764 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
765 if groups.lastindex == 4:
766 return latency, u"PASS"
767 except (IndexError, ValueError):
771 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
772 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
773 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
774 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
775 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
776 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
777 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
778 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
779 if groups.lastindex == 12:
780 return latency, u"PASS"
781 except (IndexError, ValueError):
784 # TODO: Remove when not needed
785 latency[u"NDR10"] = {
786 u"direction1": copy.copy(latency_default),
787 u"direction2": copy.copy(latency_default)
789 latency[u"NDR50"] = {
790 u"direction1": copy.copy(latency_default),
791 u"direction2": copy.copy(latency_default)
793 latency[u"NDR90"] = {
794 u"direction1": copy.copy(latency_default),
795 u"direction2": copy.copy(latency_default)
798 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
799 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
800 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
801 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
802 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
803 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
804 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
805 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
806 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
807 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
808 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
809 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
810 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
811 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
812 return latency, u"PASS"
813 except (IndexError, ValueError):
816 return latency, u"FAIL"
818 def visit_suite(self, suite):
819 """Implements traversing through the suite and its direct children.
821 :param suite: Suite to process.
825 if self.start_suite(suite) is not False:
826 suite.suites.visit(self)
827 suite.tests.visit(self)
828 self.end_suite(suite)
830 def start_suite(self, suite):
831 """Called when suite starts.
833 :param suite: Suite to process.
839 parent_name = suite.parent.name
840 except AttributeError:
843 doc_str = suite.doc.\
844 replace(u'"', u"'").\
845 replace(u'\n', u' ').\
846 replace(u'\r', u'').\
847 replace(u'*[', u' |br| *[').\
848 replace(u"*", u"**").\
849 replace(u' |br| *[', u'*[', 1)
851 self._data[u"suites"][suite.longname.lower().
853 replace(u" ", u"_")] = {
854 u"name": suite.name.lower(),
856 u"parent": parent_name,
857 u"level": len(suite.longname.split(u"."))
860 suite.keywords.visit(self)
862 def end_suite(self, suite):
863 """Called when suite ends.
865 :param suite: Suite to process.
870 def visit_test(self, test):
871 """Implements traversing through the test.
873 :param test: Test to process.
877 if self.start_test(test) is not False:
878 test.keywords.visit(self)
881 def start_test(self, test):
882 """Called when test starts.
884 :param test: Test to process.
889 longname_orig = test.longname.lower()
891 # Check the ignore list
892 if longname_orig in self._ignore:
895 tags = [str(tag) for tag in test.tags]
898 # Change the TC long name and name if defined in the mapping table
899 longname = self._mapping.get(longname_orig, None)
900 if longname is not None:
901 name = longname.split(u'.')[-1]
903 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
907 longname = longname_orig
908 name = test.name.lower()
910 # Remove TC number from the TC long name (backward compatibility):
911 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
912 # Remove TC number from the TC name (not needed):
913 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
915 test_result[u"parent"] = test.parent.name.lower()
916 test_result[u"tags"] = tags
917 test_result["doc"] = test.doc.\
918 replace(u'"', u"'").\
919 replace(u'\n', u' ').\
920 replace(u'\r', u'').\
921 replace(u'[', u' |br| [').\
922 replace(u' |br| [', u'[', 1)
923 test_result[u"msg"] = self._get_data_from_perf_test_msg(test.message).\
924 replace(u'\n', u' |br| ').\
925 replace(u'\r', u'').\
927 test_result[u"type"] = u"FUNC"
928 test_result[u"status"] = test.status
930 if u"PERFTEST" in tags:
931 # Replace info about cores (e.g. -1c-) with the info about threads
932 # and cores (e.g. -1t1c-) in the long test case names and in the
933 # test case names if necessary.
934 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
938 for tag in test_result[u"tags"]:
939 groups = re.search(self.REGEX_TC_TAG, tag)
945 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
946 f"-{tag_tc.lower()}-",
949 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
950 f"-{tag_tc.lower()}-",
954 test_result[u"status"] = u"FAIL"
955 self._data[u"tests"][self._test_id] = test_result
957 f"The test {self._test_id} has no or more than one "
958 f"multi-threading tags.\n"
959 f"Tags: {test_result[u'tags']}"
963 if test.status == u"PASS":
964 if u"NDRPDR" in tags:
965 test_result[u"type"] = u"NDRPDR"
966 test_result[u"throughput"], test_result[u"status"] = \
967 self._get_ndrpdr_throughput(test.message)
968 test_result[u"latency"], test_result[u"status"] = \
969 self._get_ndrpdr_latency(test.message)
970 elif u"SOAK" in tags:
971 test_result[u"type"] = u"SOAK"
972 test_result[u"throughput"], test_result[u"status"] = \
973 self._get_plr_throughput(test.message)
975 test_result[u"type"] = u"TCP"
976 groups = re.search(self.REGEX_TCP, test.message)
977 test_result[u"result"] = int(groups.group(2))
978 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
980 test_result[u"type"] = u"MRR"
982 test_result[u"type"] = u"BMRR"
984 test_result[u"result"] = dict()
985 groups = re.search(self.REGEX_BMRR, test.message)
986 if groups is not None:
987 items_str = groups.group(1)
988 items_float = [float(item.strip()) for item
989 in items_str.split(",")]
990 # Use whole list in CSIT-1180.
991 stats = jumpavg.AvgStdevStats.for_runs(items_float)
992 test_result[u"result"][u"receive-rate"] = stats.avg
994 groups = re.search(self.REGEX_MRR, test.message)
995 test_result[u"result"][u"receive-rate"] = \
996 float(groups.group(3)) / float(groups.group(1))
997 elif u"RECONF" in tags:
998 test_result[u"type"] = u"RECONF"
999 test_result[u"result"] = None
1001 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1002 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1003 test_result[u"result"] = {
1004 u"loss": int(grps_loss.group(1)),
1005 u"time": float(grps_time.group(1))
1007 except (AttributeError, IndexError, ValueError, TypeError):
1008 test_result[u"status"] = u"FAIL"
1010 test_result[u"status"] = u"FAIL"
1011 self._data[u"tests"][self._test_id] = test_result
1014 self._data[u"tests"][self._test_id] = test_result
1016 def end_test(self, test):
1017 """Called when test ends.
1019 :param test: Test to process.
1024 def visit_keyword(self, keyword):
1025 """Implements traversing through the keyword and its child keywords.
1027 :param keyword: Keyword to process.
1028 :type keyword: Keyword
1031 if self.start_keyword(keyword) is not False:
1032 self.end_keyword(keyword)
1034 def start_keyword(self, keyword):
1035 """Called when keyword starts. Default implementation does nothing.
1037 :param keyword: Keyword to process.
1038 :type keyword: Keyword
1042 if keyword.type == u"setup":
1043 self.visit_setup_kw(keyword)
1044 elif keyword.type == u"teardown":
1045 self.visit_teardown_kw(keyword)
1047 self.visit_test_kw(keyword)
1048 except AttributeError:
1051 def end_keyword(self, keyword):
1052 """Called when keyword ends. Default implementation does nothing.
1054 :param keyword: Keyword to process.
1055 :type keyword: Keyword
1059 def visit_test_kw(self, test_kw):
1060 """Implements traversing through the test keyword and its child
1063 :param test_kw: Keyword to process.
1064 :type test_kw: Keyword
1067 for keyword in test_kw.keywords:
1068 if self.start_test_kw(keyword) is not False:
1069 self.visit_test_kw(keyword)
1070 self.end_test_kw(keyword)
1072 def start_test_kw(self, test_kw):
1073 """Called when test keyword starts. Default implementation does
1076 :param test_kw: Keyword to process.
1077 :type test_kw: Keyword
1080 if test_kw.name.count(u"Show Runtime On All Duts") or \
1081 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1082 self._msg_type = u"test-show-runtime"
1083 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
1084 self._msg_type = u"dpdk-version"
1087 test_kw.messages.visit(self)
1089 def end_test_kw(self, test_kw):
1090 """Called when keyword ends. Default implementation does nothing.
1092 :param test_kw: Keyword to process.
1093 :type test_kw: Keyword
1097 def visit_setup_kw(self, setup_kw):
1098 """Implements traversing through the teardown keyword and its child
1101 :param setup_kw: Keyword to process.
1102 :type setup_kw: Keyword
1105 for keyword in setup_kw.keywords:
1106 if self.start_setup_kw(keyword) is not False:
1107 self.visit_setup_kw(keyword)
1108 self.end_setup_kw(keyword)
1110 def start_setup_kw(self, setup_kw):
1111 """Called when teardown keyword starts. Default implementation does
1114 :param setup_kw: Keyword to process.
1115 :type setup_kw: Keyword
1118 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1119 and not self._version:
1120 self._msg_type = u"vpp-version"
1121 elif setup_kw.name.count(u"Set Global Variable") \
1122 and not self._timestamp:
1123 self._msg_type = u"timestamp"
1124 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1125 self._msg_type = u"testbed"
1128 setup_kw.messages.visit(self)
1130 def end_setup_kw(self, setup_kw):
1131 """Called when keyword ends. Default implementation does nothing.
1133 :param setup_kw: Keyword to process.
1134 :type setup_kw: Keyword
1138 def visit_teardown_kw(self, teardown_kw):
1139 """Implements traversing through the teardown keyword and its child
1142 :param teardown_kw: Keyword to process.
1143 :type teardown_kw: Keyword
1146 for keyword in teardown_kw.keywords:
1147 if self.start_teardown_kw(keyword) is not False:
1148 self.visit_teardown_kw(keyword)
1149 self.end_teardown_kw(keyword)
1151 def start_teardown_kw(self, teardown_kw):
1152 """Called when teardown keyword starts
1154 :param teardown_kw: Keyword to process.
1155 :type teardown_kw: Keyword
1159 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1160 # TODO: Remove when not needed:
1161 self._conf_history_lookup_nr = 0
1162 self._msg_type = u"teardown-vat-history"
1163 teardown_kw.messages.visit(self)
1164 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1165 self._conf_history_lookup_nr = 0
1166 self._msg_type = u"teardown-papi-history"
1167 teardown_kw.messages.visit(self)
1169 def end_teardown_kw(self, teardown_kw):
1170 """Called when keyword ends. Default implementation does nothing.
1172 :param teardown_kw: Keyword to process.
1173 :type teardown_kw: Keyword
1177 def visit_message(self, msg):
1178 """Implements visiting the message.
1180 :param msg: Message to process.
1184 if self.start_message(msg) is not False:
1185 self.end_message(msg)
1187 def start_message(self, msg):
1188 """Called when message starts. Get required information from messages:
1191 :param msg: Message to process.
1197 self.parse_msg[self._msg_type](msg)
1199 def end_message(self, msg):
1200 """Called when message ends. Default implementation does nothing.
1202 :param msg: Message to process.
1211 The data is extracted from output.xml files generated by Jenkins jobs and
1212 stored in pandas' DataFrames.
1218 (as described in ExecutionChecker documentation)
1220 (as described in ExecutionChecker documentation)
1222 (as described in ExecutionChecker documentation)
1225 def __init__(self, spec):
1228 :param spec: Specification.
1229 :type spec: Specification
1236 self._input_data = pd.Series()
1240 """Getter - Input data.
1242 :returns: Input data
1243 :rtype: pandas.Series
1245 return self._input_data
1247 def metadata(self, job, build):
1248 """Getter - metadata
1250 :param job: Job which metadata we want.
1251 :param build: Build which metadata we want.
1255 :rtype: pandas.Series
1258 return self.data[job][build][u"metadata"]
1260 def suites(self, job, build):
1263 :param job: Job which suites we want.
1264 :param build: Build which suites we want.
1268 :rtype: pandas.Series
1271 return self.data[job][str(build)][u"suites"]
1273 def tests(self, job, build):
1276 :param job: Job which tests we want.
1277 :param build: Build which tests we want.
1281 :rtype: pandas.Series
1284 return self.data[job][build][u"tests"]
1286 def _parse_tests(self, job, build, log):
1287 """Process data from robot output.xml file and return JSON structured
1290 :param job: The name of job which build output data will be processed.
1291 :param build: The build which output data will be processed.
1292 :param log: List of log messages.
1295 :type log: list of tuples (severity, msg)
1296 :returns: JSON data structure.
1305 with open(build[u"file-name"], u'r') as data_file:
1307 result = ExecutionResult(data_file)
1308 except errors.DataError as err:
1310 (u"ERROR", f"Error occurred while parsing output.xml: "
1314 checker = ExecutionChecker(metadata, self._cfg.mapping,
1316 result.visit(checker)
1320 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1321 """Download and parse the input data file.
1323 :param pid: PID of the process executing this method.
1324 :param job: Name of the Jenkins job which generated the processed input
1326 :param build: Information about the Jenkins build which generated the
1327 processed input file.
1328 :param repeat: Repeat the download specified number of times if not
1339 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1347 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1355 f"It is not possible to download the input data file from the "
1356 f"job {job}, build {build[u'build']}, or it is damaged. "
1362 f" Processing data from the build {build[u'build']} ...")
1364 data = self._parse_tests(job, build, logs)
1368 f"Input data file from the job {job}, build "
1369 f"{build[u'build']} is damaged. Skipped.")
1372 state = u"processed"
1375 remove(build[u"file-name"])
1376 except OSError as err:
1378 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1382 # If the time-period is defined in the specification file, remove all
1383 # files which are outside the time period.
1384 timeperiod = self._cfg.input.get(u"time-period", None)
1385 if timeperiod and data:
1387 timeperiod = timedelta(int(timeperiod))
1388 metadata = data.get(u"metadata", None)
1390 generated = metadata.get(u"generated", None)
1392 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1393 if (now - generated) > timeperiod:
1394 # Remove the data and the file:
1399 f" The build {job}/{build[u'build']} is "
1400 f"outdated, will be removed.")
1402 logs.append((u"INFO", u" Done."))
1404 for level, line in logs:
1405 if level == u"INFO":
1407 elif level == u"ERROR":
1409 elif level == u"DEBUG":
1411 elif level == u"CRITICAL":
1412 logging.critical(line)
1413 elif level == u"WARNING":
1414 logging.warning(line)
1416 return {u"data": data, u"state": state, u"job": job, u"build": build}
1418 def download_and_parse_data(self, repeat=1):
1419 """Download the input data files, parse input data from input files and
1420 store in pandas' Series.
1422 :param repeat: Repeat the download specified number of times if not
1427 logging.info(u"Downloading and parsing input files ...")
1429 for job, builds in self._cfg.builds.items():
1430 for build in builds:
1432 result = self._download_and_parse_build(job, build, repeat)
1433 build_nr = result[u"build"][u"build"]
1436 data = result[u"data"]
1437 build_data = pd.Series({
1438 u"metadata": pd.Series(
1439 list(data[u"metadata"].values()),
1440 index=list(data[u"metadata"].keys())
1442 u"suites": pd.Series(
1443 list(data[u"suites"].values()),
1444 index=list(data[u"suites"].keys())
1446 u"tests": pd.Series(
1447 list(data[u"tests"].values()),
1448 index=list(data[u"tests"].keys())
1452 if self._input_data.get(job, None) is None:
1453 self._input_data[job] = pd.Series()
1454 self._input_data[job][str(build_nr)] = build_data
1456 self._cfg.set_input_file_name(
1457 job, build_nr, result[u"build"][u"file-name"])
1459 self._cfg.set_input_state(job, build_nr, result[u"state"])
1462 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1463 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1465 logging.info(u"Done.")
1468 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1469 """Return the index of character in the string which is the end of tag.
1471 :param tag_filter: The string where the end of tag is being searched.
1472 :param start: The index where the searching is stated.
1473 :param closer: The character which is the tag closer.
1474 :type tag_filter: str
1477 :returns: The index of the tag closer.
1482 idx_opener = tag_filter.index(closer, start)
1483 return tag_filter.index(closer, idx_opener + 1)
1488 def _condition(tag_filter):
1489 """Create a conditional statement from the given tag filter.
1491 :param tag_filter: Filter based on tags from the element specification.
1492 :type tag_filter: str
1493 :returns: Conditional statement which can be evaluated.
1499 index = InputData._end_of_tag(tag_filter, index)
1503 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1505 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1506 continue_on_error=False):
1507 """Filter required data from the given jobs and builds.
1509 The output data structure is:
1513 - test (or suite) 1 ID:
1519 - test (or suite) n ID:
1526 :param element: Element which will use the filtered data.
1527 :param params: Parameters which will be included in the output. If None,
1528 all parameters are included.
1529 :param data: If not None, this data is used instead of data specified
1531 :param data_set: The set of data to be filtered: tests, suites,
1533 :param continue_on_error: Continue if there is error while reading the
1534 data. The Item will be empty then
1535 :type element: pandas.Series
1539 :type continue_on_error: bool
1540 :returns: Filtered data.
1541 :rtype pandas.Series
1545 if data_set == "suites":
1547 elif element[u"filter"] in (u"all", u"template"):
1550 cond = InputData._condition(element[u"filter"])
1551 logging.debug(f" Filter: {cond}")
1553 logging.error(u" No filter defined.")
1557 params = element.get(u"parameters", None)
1559 params.append(u"type")
1561 data_to_filter = data if data else element[u"data"]
1564 for job, builds in data_to_filter.items():
1565 data[job] = pd.Series()
1566 for build in builds:
1567 data[job][str(build)] = pd.Series()
1570 self.data[job][str(build)][data_set].items())
1572 if continue_on_error:
1576 for test_id, test_data in data_dict.items():
1577 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1578 data[job][str(build)][test_id] = pd.Series()
1580 for param, val in test_data.items():
1581 data[job][str(build)][test_id][param] = val
1583 for param in params:
1585 data[job][str(build)][test_id][param] =\
1588 data[job][str(build)][test_id][param] =\
1592 except (KeyError, IndexError, ValueError) as err:
1594 f"Missing mandatory parameter in the element specification: "
1598 except AttributeError as err:
1599 logging.error(repr(err))
1601 except SyntaxError as err:
1603 f"The filter {cond} is not correct. Check if all tags are "
1604 f"enclosed by apostrophes.\n{repr(err)}"
1608 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1609 continue_on_error=False):
1610 """Filter required data from the given jobs and builds.
1612 The output data structure is:
1616 - test (or suite) 1 ID:
1622 - test (or suite) n ID:
1629 :param element: Element which will use the filtered data.
1630 :param params: Parameters which will be included in the output. If None,
1631 all parameters are included.
1632 :param data_set: The set of data to be filtered: tests, suites,
1634 :param continue_on_error: Continue if there is error while reading the
1635 data. The Item will be empty then
1636 :type element: pandas.Series
1639 :type continue_on_error: bool
1640 :returns: Filtered data.
1641 :rtype pandas.Series
1644 include = element.get(u"include", None)
1646 logging.warning(u"No tests to include, skipping the element.")
1650 params = element.get(u"parameters", None)
1652 params.append(u"type")
1656 for job, builds in element[u"data"].items():
1657 data[job] = pd.Series()
1658 for build in builds:
1659 data[job][str(build)] = pd.Series()
1660 for test in include:
1662 reg_ex = re.compile(str(test).lower())
1663 for test_id in self.data[job][
1664 str(build)][data_set].keys():
1665 if re.match(reg_ex, str(test_id).lower()):
1666 test_data = self.data[job][
1667 str(build)][data_set][test_id]
1668 data[job][str(build)][test_id] = pd.Series()
1670 for param, val in test_data.items():
1671 data[job][str(build)][test_id]\
1674 for param in params:
1676 data[job][str(build)][
1680 data[job][str(build)][
1681 test_id][param] = u"No Data"
1682 except KeyError as err:
1683 logging.error(repr(err))
1684 if continue_on_error:
1689 except (KeyError, IndexError, ValueError) as err:
1691 f"Missing mandatory parameter in the element "
1692 f"specification: {repr(err)}"
1695 except AttributeError as err:
1696 logging.error(repr(err))
1700 def merge_data(data):
1701 """Merge data from more jobs and builds to a simple data structure.
1703 The output data structure is:
1705 - test (suite) 1 ID:
1711 - test (suite) n ID:
1714 :param data: Data to merge.
1715 :type data: pandas.Series
1716 :returns: Merged data.
1717 :rtype: pandas.Series
1720 logging.info(u" Merging data ...")
1722 merged_data = pd.Series()
1723 for builds in data.values:
1724 for item in builds.values:
1725 for item_id, item_data in item.items():
1726 merged_data[item_id] = item_data
1730 def print_all_oper_data(self):
1731 """Print all operational data to console.
1739 u"Cycles per Packet",
1740 u"Average Vector Size"
1743 for job in self._input_data.values:
1744 for build in job.values:
1745 for test_id, test_data in build[u"tests"].items():
1747 if test_data.get(u"show-run", None) is None:
1749 for dut_name, data in test_data[u"show-run"].items():
1750 if data.get(u"threads", None) is None:
1752 print(f"Host IP: {data.get(u'host', '')}, "
1753 f"Socket: {data.get(u'socket', '')}")
1754 for thread_nr, thread in data[u"threads"].items():
1755 txt_table = prettytable.PrettyTable(tbl_hdr)
1758 txt_table.add_row(row)
1760 if len(thread) == 0:
1763 avg = f", Average Vector Size per Node: " \
1764 f"{(avg / len(thread)):.2f}"
1765 th_name = u"main" if thread_nr == 0 \
1766 else f"worker_{thread_nr}"
1767 print(f"{dut_name}, {th_name}{avg}")
1768 txt_table.float_format = u".2"
1769 txt_table.align = u"r"
1770 txt_table.align[u"Name"] = u"l"
1771 print(f"{txt_table.get_string()}\n")