1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
32 from json.decoder import JSONDecodeError
39 from robot.api import ExecutionResult, ResultVisitor
40 from robot import errors
42 from resources.libraries.python import jumpavg
43 from input_data_files import download_and_unzip_data_file
46 # Separator used in file names
50 class ExecutionChecker(ResultVisitor):
51 """Class to traverse through the test suite structure.
53 The functionality implemented in this class generates a json structure:
59 "generated": "Timestamp",
60 "version": "SUT version",
61 "job": "Jenkins job name",
62 "build": "Information about the build"
65 "Suite long name 1": {
67 "doc": "Suite 1 documentation",
68 "parent": "Suite 1 parent",
69 "level": "Level of the suite in the suite hierarchy"
71 "Suite long name N": {
73 "doc": "Suite N documentation",
74 "parent": "Suite 2 parent",
75 "level": "Level of the suite in the suite hierarchy"
82 "parent": "Name of the parent of the test",
83 "doc": "Test documentation",
84 "msg": "Test message",
85 "conf-history": "DUT1 and DUT2 VAT History",
86 "show-run": "Show Run",
87 "tags": ["tag 1", "tag 2", "tag n"],
89 "status": "PASS" | "FAIL",
135 "parent": "Name of the parent of the test",
136 "doc": "Test documentation",
137 "msg": "Test message",
138 "tags": ["tag 1", "tag 2", "tag n"],
140 "status": "PASS" | "FAIL",
147 "parent": "Name of the parent of the test",
148 "doc": "Test documentation",
149 "msg": "Test message",
150 "tags": ["tag 1", "tag 2", "tag n"],
151 "type": "MRR" | "BMRR",
152 "status": "PASS" | "FAIL",
154 "receive-rate": float,
155 # Average of a list, computed using AvgStdevStats.
156 # In CSIT-1180, replace with List[float].
170 "metadata": { # Optional
171 "version": "VPP version",
172 "job": "Jenkins job name",
173 "build": "Information about the build"
177 "doc": "Suite 1 documentation",
178 "parent": "Suite 1 parent",
179 "level": "Level of the suite in the suite hierarchy"
182 "doc": "Suite N documentation",
183 "parent": "Suite 2 parent",
184 "level": "Level of the suite in the suite hierarchy"
190 "parent": "Name of the parent of the test",
191 "doc": "Test documentation"
192 "msg": "Test message"
193 "tags": ["tag 1", "tag 2", "tag n"],
194 "conf-history": "DUT1 and DUT2 VAT History"
195 "show-run": "Show Run"
196 "status": "PASS" | "FAIL"
204 .. note:: ID is the lowercase full path to the test.
207 REGEX_PLR_RATE = re.compile(
208 r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
209 r'PLRsearch upper bound::?\s(\d+.\d+)'
211 REGEX_NDRPDR_RATE = re.compile(
212 r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
213 r'NDR_UPPER:\s(\d+.\d+).*\n'
214 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
215 r'PDR_UPPER:\s(\d+.\d+)'
217 REGEX_PERF_MSG_INFO = re.compile(
218 r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
219 r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
220 r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
221 r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
222 r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
224 REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
226 # TODO: Remove when not needed
227 REGEX_NDRPDR_LAT_BASE = re.compile(
228 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
229 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
231 REGEX_NDRPDR_LAT = re.compile(
232 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
233 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
234 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
235 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
236 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
237 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
239 # TODO: Remove when not needed
240 REGEX_NDRPDR_LAT_LONG = re.compile(
241 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
242 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
243 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
244 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
245 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
246 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
247 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
248 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
249 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
251 REGEX_VERSION_VPP = re.compile(
252 r"(return STDOUT Version:\s*|"
253 r"VPP Version:\s*|VPP version:\s*)(.*)"
255 REGEX_VERSION_DPDK = re.compile(
256 r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
258 REGEX_TCP = re.compile(
259 r'Total\s(rps|cps|throughput):\s(\d*).*$'
261 REGEX_MRR = re.compile(
262 r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
263 r'tx\s(\d*),\srx\s(\d*)'
265 REGEX_BMRR = re.compile(
266 r'Maximum Receive Rate trial results'
267 r' in packets per second: \[(.*)\]'
269 REGEX_RECONF_LOSS = re.compile(
270 r'Packets lost due to reconfig: (\d*)'
272 REGEX_RECONF_TIME = re.compile(
273 r'Implied time lost: (\d*.[\de-]*)'
275 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
277 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
279 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
281 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
283 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
285 def __init__(self, metadata, mapping, ignore):
288 :param metadata: Key-value pairs to be included in "metadata" part of
290 :param mapping: Mapping of the old names of test cases to the new
292 :param ignore: List of TCs to be ignored.
298 # Type of message to parse out from the test messages
299 self._msg_type = None
305 self._timestamp = None
307 # Testbed. The testbed is identified by TG node IP address.
310 # Mapping of TCs long names
311 self._mapping = mapping
314 self._ignore = ignore
316 # Number of PAPI History messages found:
318 # 1 - PAPI History of DUT1
319 # 2 - PAPI History of DUT2
320 self._conf_history_lookup_nr = 0
322 self._sh_run_counter = 0
324 # Test ID of currently processed test- the lowercase full path to the
328 # The main data structure
330 u"metadata": OrderedDict(),
331 u"suites": OrderedDict(),
332 u"tests": OrderedDict()
335 # Save the provided metadata
336 for key, val in metadata.items():
337 self._data[u"metadata"][key] = val
339 # Dictionary defining the methods used to parse different types of
342 u"timestamp": self._get_timestamp,
343 u"vpp-version": self._get_vpp_version,
344 u"dpdk-version": self._get_dpdk_version,
345 # TODO: Remove when not needed:
346 u"teardown-vat-history": self._get_vat_history,
347 u"teardown-papi-history": self._get_papi_history,
348 u"test-show-runtime": self._get_show_run,
349 u"testbed": self._get_testbed
354 """Getter - Data parsed from the XML file.
356 :returns: Data parsed from the XML file.
361 def _get_data_from_mrr_test_msg(self, msg):
362 """Get info from message of MRR performance tests.
364 :param msg: Message to be processed.
366 :returns: Processed message or original message if a problem occurs.
370 groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
371 if not groups or groups.lastindex != 1:
372 return u"Test Failed."
375 data = groups.group(1).split(u", ")
376 except (AttributeError, IndexError, ValueError, KeyError):
377 return u"Test Failed."
382 out_str += f"{(float(item) / 1e6):.2f}, "
383 return out_str[:-2] + u"]"
384 except (AttributeError, IndexError, ValueError, KeyError):
385 return u"Test Failed."
387 def _get_data_from_perf_test_msg(self, msg):
388 """Get info from message of NDRPDR performance tests.
390 :param msg: Message to be processed.
392 :returns: Processed message or original message if a problem occurs.
396 groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
397 if not groups or groups.lastindex != 10:
398 return u"Test Failed."
402 u"ndr_low": float(groups.group(1)),
403 u"ndr_low_b": float(groups.group(2)),
404 u"pdr_low": float(groups.group(3)),
405 u"pdr_low_b": float(groups.group(4)),
406 u"pdr_lat_90_1": groups.group(5),
407 u"pdr_lat_90_2": groups.group(6),
408 u"pdr_lat_50_1": groups.group(7),
409 u"pdr_lat_50_2": groups.group(8),
410 u"pdr_lat_10_1": groups.group(9),
411 u"pdr_lat_10_2": groups.group(10),
413 except (AttributeError, IndexError, ValueError, KeyError):
414 return u"Test Failed."
416 def _process_lat(in_str_1, in_str_2):
417 """Extract min, avg, max values from latency string.
419 :param in_str_1: Latency string for one direction produced by robot
421 :param in_str_2: Latency string for second direction produced by
425 :returns: Processed latency string or None if a problem occurs.
428 in_list_1 = in_str_1.split('/', 3)
429 in_list_2 = in_str_2.split('/', 3)
431 if len(in_list_1) != 4 and len(in_list_2) != 4:
434 in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
436 hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
437 except hdrh.codec.HdrLengthException:
440 in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
442 hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
443 except hdrh.codec.HdrLengthException:
446 if hdr_lat_1 and hdr_lat_2:
448 hdr_lat_1.get_value_at_percentile(50.0),
449 hdr_lat_1.get_value_at_percentile(90.0),
450 hdr_lat_1.get_value_at_percentile(99.0),
451 hdr_lat_2.get_value_at_percentile(50.0),
452 hdr_lat_2.get_value_at_percentile(90.0),
453 hdr_lat_2.get_value_at_percentile(99.0)
463 f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
464 f"{data[u'ndr_low_b']:5.2f}"
465 f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
466 f"{data[u'pdr_low_b']:5.2f}"
469 _process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
470 _process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
471 _process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
474 max_len = len(str(max((max(item) for item in latency))))
475 max_len = 4 if max_len < 4 else max_len
477 for idx, lat in enumerate(latency):
482 f"{lat[0]:{max_len}d} "
483 f"{lat[1]:{max_len}d} "
484 f"{lat[2]:{max_len}d} "
485 f"{lat[3]:{max_len}d} "
486 f"{lat[4]:{max_len}d} "
487 f"{lat[5]:{max_len}d} "
492 except (AttributeError, IndexError, ValueError, KeyError):
493 return u"Test Failed."
495 def _get_testbed(self, msg):
496 """Called when extraction of testbed IP is required.
497 The testbed is identified by TG node IP address.
499 :param msg: Message to process.
504 if msg.message.count(u"Setup of TG node") or \
505 msg.message.count(u"Setup of node TG host"):
506 reg_tg_ip = re.compile(
507 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
509 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
510 except (KeyError, ValueError, IndexError, AttributeError):
513 self._data[u"metadata"][u"testbed"] = self._testbed
514 self._msg_type = None
516 def _get_vpp_version(self, msg):
517 """Called when extraction of VPP version is required.
519 :param msg: Message to process.
524 if msg.message.count(u"return STDOUT Version:") or \
525 msg.message.count(u"VPP Version:") or \
526 msg.message.count(u"VPP version:"):
527 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
529 self._data[u"metadata"][u"version"] = self._version
530 self._msg_type = None
532 def _get_dpdk_version(self, msg):
533 """Called when extraction of DPDK version is required.
535 :param msg: Message to process.
540 if msg.message.count(u"DPDK Version:"):
542 self._version = str(re.search(
543 self.REGEX_VERSION_DPDK, msg.message).group(2))
544 self._data[u"metadata"][u"version"] = self._version
548 self._msg_type = None
550 def _get_timestamp(self, msg):
551 """Called when extraction of timestamp is required.
553 :param msg: Message to process.
558 self._timestamp = msg.timestamp[:14]
559 self._data[u"metadata"][u"generated"] = self._timestamp
560 self._msg_type = None
562 def _get_vat_history(self, msg):
563 """Called when extraction of VAT command history is required.
565 TODO: Remove when not needed.
567 :param msg: Message to process.
571 if msg.message.count(u"VAT command history:"):
572 self._conf_history_lookup_nr += 1
573 if self._conf_history_lookup_nr == 1:
574 self._data[u"tests"][self._test_id][u"conf-history"] = str()
576 self._msg_type = None
577 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
578 r"VAT command history:", u"",
579 msg.message, count=1).replace(u'\n', u' |br| ').\
582 self._data[u"tests"][self._test_id][u"conf-history"] += (
583 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
586 def _get_papi_history(self, msg):
587 """Called when extraction of PAPI command history is required.
589 :param msg: Message to process.
593 if msg.message.count(u"PAPI command history:"):
594 self._conf_history_lookup_nr += 1
595 if self._conf_history_lookup_nr == 1:
596 self._data[u"tests"][self._test_id][u"conf-history"] = str()
598 self._msg_type = None
599 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
600 r"PAPI command history:", u"",
601 msg.message, count=1).replace(u'\n', u' |br| ').\
603 self._data[u"tests"][self._test_id][u"conf-history"] += (
604 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
607 def _get_show_run(self, msg):
608 """Called when extraction of VPP operational data (output of CLI command
609 Show Runtime) is required.
611 :param msg: Message to process.
616 if not msg.message.count(u"stats runtime"):
620 if self._sh_run_counter > 1:
623 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
624 self._data[u"tests"][self._test_id][u"show-run"] = dict()
626 groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
630 host = groups.group(1)
631 except (AttributeError, IndexError):
634 sock = groups.group(2)
635 except (AttributeError, IndexError):
638 runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
639 replace(u"'", u'"').replace(u'b"', u'"').
640 replace(u'u"', u'"').split(u":", 1)[1])
643 threads_nr = len(runtime[0][u"clocks"])
644 except (IndexError, KeyError):
647 dut = u"DUT{nr}".format(
648 nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
653 u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
657 for idx in range(threads_nr):
658 if item[u"vectors"][idx] > 0:
659 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
660 elif item[u"calls"][idx] > 0:
661 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
662 elif item[u"suspends"][idx] > 0:
663 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
667 if item[u"calls"][idx] > 0:
668 vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
672 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
673 int(item[u"suspends"][idx]):
674 oper[u"threads"][idx].append([
677 item[u"vectors"][idx],
678 item[u"suspends"][idx],
683 self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
685 def _get_ndrpdr_throughput(self, msg):
686 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
689 :param msg: The test message to be parsed.
691 :returns: Parsed data as a dict and the status (PASS/FAIL).
692 :rtype: tuple(dict, str)
696 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
697 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
700 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
702 if groups is not None:
704 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
705 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
706 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
707 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
709 except (IndexError, ValueError):
712 return throughput, status
714 def _get_plr_throughput(self, msg):
715 """Get PLRsearch lower bound and PLRsearch upper bound from the test
718 :param msg: The test message to be parsed.
720 :returns: Parsed data as a dict and the status (PASS/FAIL).
721 :rtype: tuple(dict, str)
729 groups = re.search(self.REGEX_PLR_RATE, msg)
731 if groups is not None:
733 throughput[u"LOWER"] = float(groups.group(1))
734 throughput[u"UPPER"] = float(groups.group(2))
736 except (IndexError, ValueError):
739 return throughput, status
741 def _get_ndrpdr_latency(self, msg):
742 """Get LATENCY from the test message.
744 :param msg: The test message to be parsed.
746 :returns: Parsed data as a dict and the status (PASS/FAIL).
747 :rtype: tuple(dict, str)
757 u"direction1": copy.copy(latency_default),
758 u"direction2": copy.copy(latency_default)
761 u"direction1": copy.copy(latency_default),
762 u"direction2": copy.copy(latency_default)
765 u"direction1": copy.copy(latency_default),
766 u"direction2": copy.copy(latency_default)
769 u"direction1": copy.copy(latency_default),
770 u"direction2": copy.copy(latency_default)
773 u"direction1": copy.copy(latency_default),
774 u"direction2": copy.copy(latency_default)
777 u"direction1": copy.copy(latency_default),
778 u"direction2": copy.copy(latency_default)
782 # TODO: Rewrite when long and base are not needed
783 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
785 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
787 groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
789 return latency, u"FAIL"
791 def process_latency(in_str):
792 """Return object with parsed latency values.
794 TODO: Define class for the return type.
796 :param in_str: Input string, min/avg/max/hdrh format.
798 :returns: Dict with corresponding keys, except hdrh float values.
800 :throws IndexError: If in_str does not have enough substrings.
801 :throws ValueError: If a substring does not convert to float.
803 in_list = in_str.split('/', 3)
806 u"min": float(in_list[0]),
807 u"avg": float(in_list[1]),
808 u"max": float(in_list[2]),
812 if len(in_list) == 4:
813 rval[u"hdrh"] = str(in_list[3])
818 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
819 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
820 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
821 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
822 if groups.lastindex == 4:
823 return latency, u"PASS"
824 except (IndexError, ValueError):
828 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
829 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
830 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
831 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
832 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
833 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
834 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
835 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
836 if groups.lastindex == 12:
837 return latency, u"PASS"
838 except (IndexError, ValueError):
841 # TODO: Remove when not needed
842 latency[u"NDR10"] = {
843 u"direction1": copy.copy(latency_default),
844 u"direction2": copy.copy(latency_default)
846 latency[u"NDR50"] = {
847 u"direction1": copy.copy(latency_default),
848 u"direction2": copy.copy(latency_default)
850 latency[u"NDR90"] = {
851 u"direction1": copy.copy(latency_default),
852 u"direction2": copy.copy(latency_default)
855 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
856 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
857 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
858 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
859 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
860 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
861 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
862 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
863 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
864 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
865 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
866 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
867 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
868 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
869 return latency, u"PASS"
870 except (IndexError, ValueError):
873 return latency, u"FAIL"
876 def _get_hoststack_data(msg, tags):
877 """Get data from the hoststack test message.
879 :param msg: The test message to be parsed.
880 :param tags: Test tags.
883 :returns: Parsed data as a JSON dict and the status (PASS/FAIL).
884 :rtype: tuple(dict, str)
889 msg = msg.replace(u"'", u'"').replace(u" ", u"")
890 if u"LDPRELOAD" in tags:
894 except JSONDecodeError:
896 elif u"VPPECHO" in tags:
898 msg_lst = msg.replace(u"}{", u"} {").split(u" ")
900 client=loads(msg_lst[0]),
901 server=loads(msg_lst[1])
904 except (JSONDecodeError, IndexError):
907 return result, status
909 def visit_suite(self, suite):
910 """Implements traversing through the suite and its direct children.
912 :param suite: Suite to process.
916 if self.start_suite(suite) is not False:
917 suite.suites.visit(self)
918 suite.tests.visit(self)
919 self.end_suite(suite)
921 def start_suite(self, suite):
922 """Called when suite starts.
924 :param suite: Suite to process.
930 parent_name = suite.parent.name
931 except AttributeError:
934 doc_str = suite.doc.\
935 replace(u'"', u"'").\
936 replace(u'\n', u' ').\
937 replace(u'\r', u'').\
938 replace(u'*[', u' |br| *[').\
939 replace(u"*", u"**").\
940 replace(u' |br| *[', u'*[', 1)
942 self._data[u"suites"][suite.longname.lower().
944 replace(u" ", u"_")] = {
945 u"name": suite.name.lower(),
947 u"parent": parent_name,
948 u"level": len(suite.longname.split(u"."))
951 suite.keywords.visit(self)
953 def end_suite(self, suite):
954 """Called when suite ends.
956 :param suite: Suite to process.
961 def visit_test(self, test):
962 """Implements traversing through the test.
964 :param test: Test to process.
968 if self.start_test(test) is not False:
969 test.keywords.visit(self)
972 def start_test(self, test):
973 """Called when test starts.
975 :param test: Test to process.
980 self._sh_run_counter = 0
982 longname_orig = test.longname.lower()
984 # Check the ignore list
985 if longname_orig in self._ignore:
988 tags = [str(tag) for tag in test.tags]
991 # Change the TC long name and name if defined in the mapping table
992 longname = self._mapping.get(longname_orig, None)
993 if longname is not None:
994 name = longname.split(u'.')[-1]
996 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
1000 longname = longname_orig
1001 name = test.name.lower()
1003 # Remove TC number from the TC long name (backward compatibility):
1004 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
1005 # Remove TC number from the TC name (not needed):
1006 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
1008 test_result[u"parent"] = test.parent.name.lower()
1009 test_result[u"tags"] = tags
1010 test_result["doc"] = test.doc.\
1011 replace(u'"', u"'").\
1012 replace(u'\n', u' ').\
1013 replace(u'\r', u'').\
1014 replace(u'[', u' |br| [').\
1015 replace(u' |br| [', u'[', 1)
1016 test_result[u"type"] = u"FUNC"
1017 test_result[u"status"] = test.status
1019 if test.status == u"PASS":
1020 if u"NDRPDR" in tags:
1021 test_result[u"msg"] = self._get_data_from_perf_test_msg(
1022 test.message).replace(u'\n', u' |br| ').\
1023 replace(u'\r', u'').replace(u'"', u"'")
1024 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1025 test_result[u"msg"] = self._get_data_from_mrr_test_msg(
1026 test.message).replace(u'\n', u' |br| ').\
1027 replace(u'\r', u'').replace(u'"', u"'")
1029 test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
1030 replace(u'\r', u'').replace(u'"', u"'")
1032 test_result[u"msg"] = u"Test Failed."
1034 if u"PERFTEST" in tags:
1035 # Replace info about cores (e.g. -1c-) with the info about threads
1036 # and cores (e.g. -1t1c-) in the long test case names and in the
1037 # test case names if necessary.
1038 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
1042 for tag in test_result[u"tags"]:
1043 groups = re.search(self.REGEX_TC_TAG, tag)
1049 self._test_id = re.sub(
1050 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1051 self._test_id, count=1
1053 test_result[u"name"] = re.sub(
1054 self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
1055 test_result["name"], count=1
1058 test_result[u"status"] = u"FAIL"
1059 self._data[u"tests"][self._test_id] = test_result
1061 f"The test {self._test_id} has no or more than one "
1062 f"multi-threading tags.\n"
1063 f"Tags: {test_result[u'tags']}"
1067 if test.status == u"PASS":
1068 if u"NDRPDR" in tags:
1069 test_result[u"type"] = u"NDRPDR"
1070 test_result[u"throughput"], test_result[u"status"] = \
1071 self._get_ndrpdr_throughput(test.message)
1072 test_result[u"latency"], test_result[u"status"] = \
1073 self._get_ndrpdr_latency(test.message)
1074 elif u"SOAK" in tags:
1075 test_result[u"type"] = u"SOAK"
1076 test_result[u"throughput"], test_result[u"status"] = \
1077 self._get_plr_throughput(test.message)
1078 elif u"HOSTSTACK" in tags:
1079 test_result[u"type"] = u"HOSTSTACK"
1080 test_result[u"result"], test_result[u"status"] = \
1081 self._get_hoststack_data(test.message, tags)
1082 elif u"TCP" in tags:
1083 test_result[u"type"] = u"TCP"
1084 groups = re.search(self.REGEX_TCP, test.message)
1085 test_result[u"result"] = int(groups.group(2))
1086 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
1088 test_result[u"type"] = u"MRR"
1090 test_result[u"type"] = u"BMRR"
1092 test_result[u"result"] = dict()
1093 groups = re.search(self.REGEX_BMRR, test.message)
1094 if groups is not None:
1095 items_str = groups.group(1)
1096 items_float = [float(item.strip()) for item
1097 in items_str.split(",")]
1098 # Use whole list in CSIT-1180.
1099 stats = jumpavg.AvgStdevStats.for_runs(items_float)
1100 test_result[u"result"][u"receive-rate"] = stats.avg
1102 groups = re.search(self.REGEX_MRR, test.message)
1103 test_result[u"result"][u"receive-rate"] = \
1104 float(groups.group(3)) / float(groups.group(1))
1105 elif u"RECONF" in tags:
1106 test_result[u"type"] = u"RECONF"
1107 test_result[u"result"] = None
1109 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
1110 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
1111 test_result[u"result"] = {
1112 u"loss": int(grps_loss.group(1)),
1113 u"time": float(grps_time.group(1))
1115 except (AttributeError, IndexError, ValueError, TypeError):
1116 test_result[u"status"] = u"FAIL"
1117 elif u"DEVICETEST" in tags:
1118 test_result[u"type"] = u"DEVICETEST"
1120 test_result[u"status"] = u"FAIL"
1121 self._data[u"tests"][self._test_id] = test_result
1124 self._data[u"tests"][self._test_id] = test_result
1126 def end_test(self, test):
1127 """Called when test ends.
1129 :param test: Test to process.
1134 def visit_keyword(self, keyword):
1135 """Implements traversing through the keyword and its child keywords.
1137 :param keyword: Keyword to process.
1138 :type keyword: Keyword
1141 if self.start_keyword(keyword) is not False:
1142 self.end_keyword(keyword)
1144 def start_keyword(self, keyword):
1145 """Called when keyword starts. Default implementation does nothing.
1147 :param keyword: Keyword to process.
1148 :type keyword: Keyword
1152 if keyword.type == u"setup":
1153 self.visit_setup_kw(keyword)
1154 elif keyword.type == u"teardown":
1155 self.visit_teardown_kw(keyword)
1157 self.visit_test_kw(keyword)
1158 except AttributeError:
1161 def end_keyword(self, keyword):
1162 """Called when keyword ends. Default implementation does nothing.
1164 :param keyword: Keyword to process.
1165 :type keyword: Keyword
1169 def visit_test_kw(self, test_kw):
1170 """Implements traversing through the test keyword and its child
1173 :param test_kw: Keyword to process.
1174 :type test_kw: Keyword
1177 for keyword in test_kw.keywords:
1178 if self.start_test_kw(keyword) is not False:
1179 self.visit_test_kw(keyword)
1180 self.end_test_kw(keyword)
1182 def start_test_kw(self, test_kw):
1183 """Called when test keyword starts. Default implementation does
1186 :param test_kw: Keyword to process.
1187 :type test_kw: Keyword
1190 if test_kw.name.count(u"Show Runtime On All Duts") or \
1191 test_kw.name.count(u"Show Runtime Counters On All Duts"):
1192 self._msg_type = u"test-show-runtime"
1193 self._sh_run_counter += 1
1194 elif test_kw.name.count(u"Install Dpdk Test On All Duts") and \
1196 self._msg_type = u"dpdk-version"
1199 test_kw.messages.visit(self)
1201 def end_test_kw(self, test_kw):
1202 """Called when keyword ends. Default implementation does nothing.
1204 :param test_kw: Keyword to process.
1205 :type test_kw: Keyword
1209 def visit_setup_kw(self, setup_kw):
1210 """Implements traversing through the teardown keyword and its child
1213 :param setup_kw: Keyword to process.
1214 :type setup_kw: Keyword
1217 for keyword in setup_kw.keywords:
1218 if self.start_setup_kw(keyword) is not False:
1219 self.visit_setup_kw(keyword)
1220 self.end_setup_kw(keyword)
1222 def start_setup_kw(self, setup_kw):
1223 """Called when teardown keyword starts. Default implementation does
1226 :param setup_kw: Keyword to process.
1227 :type setup_kw: Keyword
1230 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1231 and not self._version:
1232 self._msg_type = u"vpp-version"
1233 elif setup_kw.name.count(u"Set Global Variable") \
1234 and not self._timestamp:
1235 self._msg_type = u"timestamp"
1236 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1237 self._msg_type = u"testbed"
1240 setup_kw.messages.visit(self)
1242 def end_setup_kw(self, setup_kw):
1243 """Called when keyword ends. Default implementation does nothing.
1245 :param setup_kw: Keyword to process.
1246 :type setup_kw: Keyword
1250 def visit_teardown_kw(self, teardown_kw):
1251 """Implements traversing through the teardown keyword and its child
1254 :param teardown_kw: Keyword to process.
1255 :type teardown_kw: Keyword
1258 for keyword in teardown_kw.keywords:
1259 if self.start_teardown_kw(keyword) is not False:
1260 self.visit_teardown_kw(keyword)
1261 self.end_teardown_kw(keyword)
1263 def start_teardown_kw(self, teardown_kw):
1264 """Called when teardown keyword starts
1266 :param teardown_kw: Keyword to process.
1267 :type teardown_kw: Keyword
1271 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1272 # TODO: Remove when not needed:
1273 self._conf_history_lookup_nr = 0
1274 self._msg_type = u"teardown-vat-history"
1275 teardown_kw.messages.visit(self)
1276 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1277 self._conf_history_lookup_nr = 0
1278 self._msg_type = u"teardown-papi-history"
1279 teardown_kw.messages.visit(self)
1281 def end_teardown_kw(self, teardown_kw):
1282 """Called when keyword ends. Default implementation does nothing.
1284 :param teardown_kw: Keyword to process.
1285 :type teardown_kw: Keyword
1289 def visit_message(self, msg):
1290 """Implements visiting the message.
1292 :param msg: Message to process.
1296 if self.start_message(msg) is not False:
1297 self.end_message(msg)
1299 def start_message(self, msg):
1300 """Called when message starts. Get required information from messages:
1303 :param msg: Message to process.
1309 self.parse_msg[self._msg_type](msg)
1311 def end_message(self, msg):
1312 """Called when message ends. Default implementation does nothing.
1314 :param msg: Message to process.
1323 The data is extracted from output.xml files generated by Jenkins jobs and
1324 stored in pandas' DataFrames.
1330 (as described in ExecutionChecker documentation)
1332 (as described in ExecutionChecker documentation)
1334 (as described in ExecutionChecker documentation)
1337 def __init__(self, spec):
1340 :param spec: Specification.
1341 :type spec: Specification
1348 self._input_data = pd.Series()
1352 """Getter - Input data.
1354 :returns: Input data
1355 :rtype: pandas.Series
1357 return self._input_data
1359 def metadata(self, job, build):
1360 """Getter - metadata
1362 :param job: Job which metadata we want.
1363 :param build: Build which metadata we want.
1367 :rtype: pandas.Series
1370 return self.data[job][build][u"metadata"]
1372 def suites(self, job, build):
1375 :param job: Job which suites we want.
1376 :param build: Build which suites we want.
1380 :rtype: pandas.Series
1383 return self.data[job][str(build)][u"suites"]
1385 def tests(self, job, build):
1388 :param job: Job which tests we want.
1389 :param build: Build which tests we want.
1393 :rtype: pandas.Series
1396 return self.data[job][build][u"tests"]
1398 def _parse_tests(self, job, build, log):
1399 """Process data from robot output.xml file and return JSON structured
1402 :param job: The name of job which build output data will be processed.
1403 :param build: The build which output data will be processed.
1404 :param log: List of log messages.
1407 :type log: list of tuples (severity, msg)
1408 :returns: JSON data structure.
1417 with open(build[u"file-name"], u'r') as data_file:
1419 result = ExecutionResult(data_file)
1420 except errors.DataError as err:
1422 (u"ERROR", f"Error occurred while parsing output.xml: "
1426 checker = ExecutionChecker(metadata, self._cfg.mapping,
1428 result.visit(checker)
1432 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1433 """Download and parse the input data file.
1435 :param pid: PID of the process executing this method.
1436 :param job: Name of the Jenkins job which generated the processed input
1438 :param build: Information about the Jenkins build which generated the
1439 processed input file.
1440 :param repeat: Repeat the download specified number of times if not
1451 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1459 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1467 f"It is not possible to download the input data file from the "
1468 f"job {job}, build {build[u'build']}, or it is damaged. "
1474 f" Processing data from the build {build[u'build']} ...")
1476 data = self._parse_tests(job, build, logs)
1480 f"Input data file from the job {job}, build "
1481 f"{build[u'build']} is damaged. Skipped.")
1484 state = u"processed"
1487 remove(build[u"file-name"])
1488 except OSError as err:
1490 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1494 # If the time-period is defined in the specification file, remove all
1495 # files which are outside the time period.
1496 timeperiod = self._cfg.input.get(u"time-period", None)
1497 if timeperiod and data:
1499 timeperiod = timedelta(int(timeperiod))
1500 metadata = data.get(u"metadata", None)
1502 generated = metadata.get(u"generated", None)
1504 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1505 if (now - generated) > timeperiod:
1506 # Remove the data and the file:
1511 f" The build {job}/{build[u'build']} is "
1512 f"outdated, will be removed.")
1514 logs.append((u"INFO", u" Done."))
1516 for level, line in logs:
1517 if level == u"INFO":
1519 elif level == u"ERROR":
1521 elif level == u"DEBUG":
1523 elif level == u"CRITICAL":
1524 logging.critical(line)
1525 elif level == u"WARNING":
1526 logging.warning(line)
1528 return {u"data": data, u"state": state, u"job": job, u"build": build}
1530 def download_and_parse_data(self, repeat=1):
1531 """Download the input data files, parse input data from input files and
1532 store in pandas' Series.
1534 :param repeat: Repeat the download specified number of times if not
1539 logging.info(u"Downloading and parsing input files ...")
1541 for job, builds in self._cfg.builds.items():
1542 for build in builds:
1544 result = self._download_and_parse_build(job, build, repeat)
1545 build_nr = result[u"build"][u"build"]
1548 data = result[u"data"]
1549 build_data = pd.Series({
1550 u"metadata": pd.Series(
1551 list(data[u"metadata"].values()),
1552 index=list(data[u"metadata"].keys())
1554 u"suites": pd.Series(
1555 list(data[u"suites"].values()),
1556 index=list(data[u"suites"].keys())
1558 u"tests": pd.Series(
1559 list(data[u"tests"].values()),
1560 index=list(data[u"tests"].keys())
1564 if self._input_data.get(job, None) is None:
1565 self._input_data[job] = pd.Series()
1566 self._input_data[job][str(build_nr)] = build_data
1568 self._cfg.set_input_file_name(
1569 job, build_nr, result[u"build"][u"file-name"])
1571 self._cfg.set_input_state(job, build_nr, result[u"state"])
1574 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1575 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1577 logging.info(u"Done.")
1580 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1581 """Return the index of character in the string which is the end of tag.
1583 :param tag_filter: The string where the end of tag is being searched.
1584 :param start: The index where the searching is stated.
1585 :param closer: The character which is the tag closer.
1586 :type tag_filter: str
1589 :returns: The index of the tag closer.
1594 idx_opener = tag_filter.index(closer, start)
1595 return tag_filter.index(closer, idx_opener + 1)
1600 def _condition(tag_filter):
1601 """Create a conditional statement from the given tag filter.
1603 :param tag_filter: Filter based on tags from the element specification.
1604 :type tag_filter: str
1605 :returns: Conditional statement which can be evaluated.
1611 index = InputData._end_of_tag(tag_filter, index)
1615 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1617 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1618 continue_on_error=False):
1619 """Filter required data from the given jobs and builds.
1621 The output data structure is:
1625 - test (or suite) 1 ID:
1631 - test (or suite) n ID:
1638 :param element: Element which will use the filtered data.
1639 :param params: Parameters which will be included in the output. If None,
1640 all parameters are included.
1641 :param data: If not None, this data is used instead of data specified
1643 :param data_set: The set of data to be filtered: tests, suites,
1645 :param continue_on_error: Continue if there is error while reading the
1646 data. The Item will be empty then
1647 :type element: pandas.Series
1651 :type continue_on_error: bool
1652 :returns: Filtered data.
1653 :rtype pandas.Series
1657 if data_set == "suites":
1659 elif element[u"filter"] in (u"all", u"template"):
1662 cond = InputData._condition(element[u"filter"])
1663 logging.debug(f" Filter: {cond}")
1665 logging.error(u" No filter defined.")
1669 params = element.get(u"parameters", None)
1671 params.append(u"type")
1673 data_to_filter = data if data else element[u"data"]
1676 for job, builds in data_to_filter.items():
1677 data[job] = pd.Series()
1678 for build in builds:
1679 data[job][str(build)] = pd.Series()
1682 self.data[job][str(build)][data_set].items())
1684 if continue_on_error:
1688 for test_id, test_data in data_dict.items():
1689 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1690 data[job][str(build)][test_id] = pd.Series()
1692 for param, val in test_data.items():
1693 data[job][str(build)][test_id][param] = val
1695 for param in params:
1697 data[job][str(build)][test_id][param] =\
1700 data[job][str(build)][test_id][param] =\
1704 except (KeyError, IndexError, ValueError) as err:
1706 f"Missing mandatory parameter in the element specification: "
1710 except AttributeError as err:
1711 logging.error(repr(err))
1713 except SyntaxError as err:
1715 f"The filter {cond} is not correct. Check if all tags are "
1716 f"enclosed by apostrophes.\n{repr(err)}"
1720 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1721 continue_on_error=False):
1722 """Filter required data from the given jobs and builds.
1724 The output data structure is:
1728 - test (or suite) 1 ID:
1734 - test (or suite) n ID:
1741 :param element: Element which will use the filtered data.
1742 :param params: Parameters which will be included in the output. If None,
1743 all parameters are included.
1744 :param data_set: The set of data to be filtered: tests, suites,
1746 :param continue_on_error: Continue if there is error while reading the
1747 data. The Item will be empty then
1748 :type element: pandas.Series
1751 :type continue_on_error: bool
1752 :returns: Filtered data.
1753 :rtype pandas.Series
1756 include = element.get(u"include", None)
1758 logging.warning(u"No tests to include, skipping the element.")
1762 params = element.get(u"parameters", None)
1764 params.append(u"type")
1768 for job, builds in element[u"data"].items():
1769 data[job] = pd.Series()
1770 for build in builds:
1771 data[job][str(build)] = pd.Series()
1772 for test in include:
1774 reg_ex = re.compile(str(test).lower())
1775 for test_id in self.data[job][
1776 str(build)][data_set].keys():
1777 if re.match(reg_ex, str(test_id).lower()):
1778 test_data = self.data[job][
1779 str(build)][data_set][test_id]
1780 data[job][str(build)][test_id] = pd.Series()
1782 for param, val in test_data.items():
1783 data[job][str(build)][test_id]\
1786 for param in params:
1788 data[job][str(build)][
1792 data[job][str(build)][
1793 test_id][param] = u"No Data"
1794 except KeyError as err:
1795 logging.error(repr(err))
1796 if continue_on_error:
1801 except (KeyError, IndexError, ValueError) as err:
1803 f"Missing mandatory parameter in the element "
1804 f"specification: {repr(err)}"
1807 except AttributeError as err:
1808 logging.error(repr(err))
1812 def merge_data(data):
1813 """Merge data from more jobs and builds to a simple data structure.
1815 The output data structure is:
1817 - test (suite) 1 ID:
1823 - test (suite) n ID:
1826 :param data: Data to merge.
1827 :type data: pandas.Series
1828 :returns: Merged data.
1829 :rtype: pandas.Series
1832 logging.info(u" Merging data ...")
1834 merged_data = pd.Series()
1835 for builds in data.values:
1836 for item in builds.values:
1837 for item_id, item_data in item.items():
1838 merged_data[item_id] = item_data
1842 def print_all_oper_data(self):
1843 """Print all operational data to console.
1851 u"Cycles per Packet",
1852 u"Average Vector Size"
1855 for job in self._input_data.values:
1856 for build in job.values:
1857 for test_id, test_data in build[u"tests"].items():
1859 if test_data.get(u"show-run", None) is None:
1861 for dut_name, data in test_data[u"show-run"].items():
1862 if data.get(u"threads", None) is None:
1864 print(f"Host IP: {data.get(u'host', '')}, "
1865 f"Socket: {data.get(u'socket', '')}")
1866 for thread_nr, thread in data[u"threads"].items():
1867 txt_table = prettytable.PrettyTable(tbl_hdr)
1870 txt_table.add_row(row)
1872 if len(thread) == 0:
1875 avg = f", Average Vector Size per Node: " \
1876 f"{(avg / len(thread)):.2f}"
1877 th_name = u"main" if thread_nr == 0 \
1878 else f"worker_{thread_nr}"
1879 print(f"{dut_name}, {th_name}{avg}")
1880 txt_table.float_format = u".2"
1881 txt_table.align = u"r"
1882 txt_table.align[u"Name"] = u"l"
1883 print(f"{txt_table.get_string()}\n")