1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Data pre-processing
16 - extract data from output.xml files generated by Jenkins jobs and store in
18 - provide access to the data.
19 - filter the data using tags,
27 from collections import OrderedDict
29 from datetime import datetime as dt
30 from datetime import timedelta
31 from json import loads
36 from robot.api import ExecutionResult, ResultVisitor
37 from robot import errors
39 from resources.libraries.python import jumpavg
40 from input_data_files import download_and_unzip_data_file
43 # Separator used in file names
47 class ExecutionChecker(ResultVisitor):
48 """Class to traverse through the test suite structure.
50 The functionality implemented in this class generates a json structure:
56 "generated": "Timestamp",
57 "version": "SUT version",
58 "job": "Jenkins job name",
59 "build": "Information about the build"
62 "Suite long name 1": {
64 "doc": "Suite 1 documentation",
65 "parent": "Suite 1 parent",
66 "level": "Level of the suite in the suite hierarchy"
68 "Suite long name N": {
70 "doc": "Suite N documentation",
71 "parent": "Suite 2 parent",
72 "level": "Level of the suite in the suite hierarchy"
79 "parent": "Name of the parent of the test",
80 "doc": "Test documentation",
81 "msg": "Test message",
82 "conf-history": "DUT1 and DUT2 VAT History",
83 "show-run": "Show Run",
84 "tags": ["tag 1", "tag 2", "tag n"],
86 "status": "PASS" | "FAIL",
132 "parent": "Name of the parent of the test",
133 "doc": "Test documentation",
134 "msg": "Test message",
135 "tags": ["tag 1", "tag 2", "tag n"],
137 "status": "PASS" | "FAIL",
144 "parent": "Name of the parent of the test",
145 "doc": "Test documentation",
146 "msg": "Test message",
147 "tags": ["tag 1", "tag 2", "tag n"],
148 "type": "MRR" | "BMRR",
149 "status": "PASS" | "FAIL",
151 "receive-rate": float,
152 # Average of a list, computed using AvgStdevStats.
153 # In CSIT-1180, replace with List[float].
167 "metadata": { # Optional
168 "version": "VPP version",
169 "job": "Jenkins job name",
170 "build": "Information about the build"
174 "doc": "Suite 1 documentation",
175 "parent": "Suite 1 parent",
176 "level": "Level of the suite in the suite hierarchy"
179 "doc": "Suite N documentation",
180 "parent": "Suite 2 parent",
181 "level": "Level of the suite in the suite hierarchy"
187 "parent": "Name of the parent of the test",
188 "doc": "Test documentation"
189 "msg": "Test message"
190 "tags": ["tag 1", "tag 2", "tag n"],
191 "conf-history": "DUT1 and DUT2 VAT History"
192 "show-run": "Show Run"
193 "status": "PASS" | "FAIL"
201 .. note:: ID is the lowercase full path to the test.
204 REGEX_PLR_RATE = re.compile(r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
205 r'PLRsearch upper bound::?\s(\d+.\d+)')
207 REGEX_NDRPDR_RATE = re.compile(r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
208 r'NDR_UPPER:\s(\d+.\d+).*\n'
209 r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
210 r'PDR_UPPER:\s(\d+.\d+)')
212 REGEX_NDRPDR_LAT = re.compile(r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
213 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]')
215 REGEX_NDRPDR_LAT_LONG = re.compile(
216 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
217 r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
218 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
219 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
220 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
221 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
222 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
223 r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
224 r'Latency.*\[\'(.*)\', \'(.*)\'\]'
227 REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
230 REGEX_VERSION_VPP = re.compile(r"(return STDOUT Version:\s*|"
231 r"VPP Version:\s*|VPP version:\s*)(.*)")
233 REGEX_VERSION_DPDK = re.compile(r"(DPDK version:\s*|DPDK Version:\s*)(.*)")
235 REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s(\d*).*$')
237 REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
238 r'tx\s(\d*),\srx\s(\d*)')
240 REGEX_BMRR = re.compile(r'Maximum Receive Rate trial results'
241 r' in packets per second: \[(.*)\]')
243 REGEX_RECONF_LOSS = re.compile(r'Packets lost due to reconfig: (\d*)')
244 REGEX_RECONF_TIME = re.compile(r'Implied time lost: (\d*.[\de-]*)')
246 REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
248 REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
250 REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
252 REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
254 REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
256 def __init__(self, metadata, mapping, ignore):
259 :param metadata: Key-value pairs to be included in "metadata" part of
261 :param mapping: Mapping of the old names of test cases to the new
263 :param ignore: List of TCs to be ignored.
269 # Type of message to parse out from the test messages
270 self._msg_type = None
276 self._timestamp = None
278 # Testbed. The testbed is identified by TG node IP address.
281 # Mapping of TCs long names
282 self._mapping = mapping
285 self._ignore = ignore
287 # Number of PAPI History messages found:
289 # 1 - PAPI History of DUT1
290 # 2 - PAPI History of DUT2
291 self._lookup_kw_nr = 0
292 self._conf_history_lookup_nr = 0
294 # Number of Show Running messages found
296 # 1 - Show run message found
297 self._show_run_lookup_nr = 0
299 # Test ID of currently processed test- the lowercase full path to the
303 # The main data structure
305 u"metadata": OrderedDict(),
306 u"suites": OrderedDict(),
307 u"tests": OrderedDict()
310 # Save the provided metadata
311 for key, val in metadata.items():
312 self._data[u"metadata"][key] = val
314 # Dictionary defining the methods used to parse different types of
317 u"timestamp": self._get_timestamp,
318 u"vpp-version": self._get_vpp_version,
319 u"dpdk-version": self._get_dpdk_version,
320 # TODO: Remove when not needed:
321 u"teardown-vat-history": self._get_vat_history,
322 u"teardown-papi-history": self._get_papi_history,
323 u"test-show-runtime": self._get_show_run,
324 u"testbed": self._get_testbed
329 """Getter - Data parsed from the XML file.
331 :returns: Data parsed from the XML file.
336 def _get_testbed(self, msg):
337 """Called when extraction of testbed IP is required.
338 The testbed is identified by TG node IP address.
340 :param msg: Message to process.
345 if msg.message.count(u"Setup of TG node") or \
346 msg.message.count(u"Setup of node TG host"):
347 reg_tg_ip = re.compile(
348 r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
350 self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
351 except (KeyError, ValueError, IndexError, AttributeError):
354 self._data[u"metadata"][u"testbed"] = self._testbed
355 self._msg_type = None
357 def _get_vpp_version(self, msg):
358 """Called when extraction of VPP version is required.
360 :param msg: Message to process.
365 if msg.message.count(u"return STDOUT Version:") or \
366 msg.message.count(u"VPP Version:") or \
367 msg.message.count(u"VPP version:"):
368 self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
370 self._data[u"metadata"][u"version"] = self._version
371 self._msg_type = None
373 def _get_dpdk_version(self, msg):
374 """Called when extraction of DPDK version is required.
376 :param msg: Message to process.
381 if msg.message.count(u"DPDK Version:"):
383 self._version = str(re.search(
384 self.REGEX_VERSION_DPDK, msg.message).group(2))
385 self._data[u"metadata"][u"version"] = self._version
389 self._msg_type = None
391 def _get_timestamp(self, msg):
392 """Called when extraction of timestamp is required.
394 :param msg: Message to process.
399 self._timestamp = msg.timestamp[:14]
400 self._data[u"metadata"][u"generated"] = self._timestamp
401 self._msg_type = None
403 def _get_vat_history(self, msg):
404 """Called when extraction of VAT command history is required.
406 TODO: Remove when not needed.
408 :param msg: Message to process.
412 if msg.message.count(u"VAT command history:"):
413 self._conf_history_lookup_nr += 1
414 if self._conf_history_lookup_nr == 1:
415 self._data[u"tests"][self._test_id][u"conf-history"] = str()
417 self._msg_type = None
418 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
419 r"VAT command history:", u"",
420 msg.message, count=1).replace(u'\n', u' |br| ').\
423 self._data[u"tests"][self._test_id][u"conf-history"] += (
424 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
427 def _get_papi_history(self, msg):
428 """Called when extraction of PAPI command history is required.
430 :param msg: Message to process.
434 if msg.message.count(u"PAPI command history:"):
435 self._conf_history_lookup_nr += 1
436 if self._conf_history_lookup_nr == 1:
437 self._data[u"tests"][self._test_id][u"conf-history"] = str()
439 self._msg_type = None
440 text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
441 r"PAPI command history:", u"",
442 msg.message, count=1).replace(u'\n', u' |br| ').\
444 self._data[u"tests"][self._test_id][u"conf-history"] += (
445 f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
448 def _get_show_run(self, msg):
449 """Called when extraction of VPP operational data (output of CLI command
450 Show Runtime) is required.
452 :param msg: Message to process.
457 if u"show-run" not in self._data[u"tests"][self._test_id].keys():
458 self._data[u"tests"][self._test_id][u"show-run"] = str()
460 if msg.message.count(u"stats runtime") or \
461 msg.message.count(u"Runtime"):
463 host = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
465 except (AttributeError, IndexError):
466 host = self._data[u"tests"][self._test_id][u"show-run"].\
469 socket = str(re.search(self.REGEX_TC_PAPI_CLI, msg.message).
471 socket = f"/{socket}"
472 except (AttributeError, IndexError):
479 replace(u'b"', u'"').
480 replace(u'u"', u'"').
484 threads_nr = len(runtime[0][u"clocks"])
485 except (IndexError, KeyError):
495 table = [[tbl_hdr, ] for _ in range(threads_nr)]
497 for idx in range(threads_nr):
498 name = format(item[u"name"])
499 calls = format(item[u"calls"][idx])
500 vectors = format(item[u"vectors"][idx])
501 suspends = format(item[u"suspends"][idx])
502 if item[u"vectors"][idx] > 0:
504 item[u"clocks"][idx]/item[u"vectors"][idx], u".2e")
505 elif item[u"calls"][idx] > 0:
507 item[u"clocks"][idx]/item[u"calls"][idx], u".2e")
508 elif item[u"suspends"][idx] > 0:
510 item[u"clocks"][idx]/item[u"suspends"][idx], u".2e")
513 if item[u"calls"][idx] > 0:
514 vectors_call = format(
515 item[u"vectors"][idx]/item[u"calls"][idx], u".2f")
517 vectors_call = format(0, u".2f")
518 if int(calls) + int(vectors) + int(suspends):
520 name, calls, vectors, suspends, clocks, vectors_call
523 for idx in range(threads_nr):
524 text += f"Thread {idx} "
525 text += u"vpp_main\n" if idx == 0 else f"vpp_wk_{idx-1}\n"
527 for row in table[idx]:
528 if txt_table is None:
529 txt_table = prettytable.PrettyTable(row)
532 txt_table.add_row(row)
533 txt_table.set_style(prettytable.MSWORD_FRIENDLY)
534 txt_table.align[u"Name"] = u"l"
535 txt_table.align[u"Calls"] = u"r"
536 txt_table.align[u"Vectors"] = u"r"
537 txt_table.align[u"Suspends"] = u"r"
538 txt_table.align[u"Clocks"] = u"r"
539 txt_table.align[u"Vectors/Calls"] = u"r"
541 text += txt_table.get_string(sortby=u"Name") + u'\n'
542 text = f"\n**DUT: {host}{socket}**\n{text}".\
543 replace(u'\n', u' |br| ').\
544 replace(u'\r', u'').\
546 self._data[u"tests"][self._test_id][u"show-run"] += text
548 def _get_ndrpdr_throughput(self, msg):
549 """Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
552 :param msg: The test message to be parsed.
554 :returns: Parsed data as a dict and the status (PASS/FAIL).
555 :rtype: tuple(dict, str)
559 u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
560 u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
563 groups = re.search(self.REGEX_NDRPDR_RATE, msg)
565 if groups is not None:
567 throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
568 throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
569 throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
570 throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
572 except (IndexError, ValueError):
575 return throughput, status
577 def _get_plr_throughput(self, msg):
578 """Get PLRsearch lower bound and PLRsearch upper bound from the test
581 :param msg: The test message to be parsed.
583 :returns: Parsed data as a dict and the status (PASS/FAIL).
584 :rtype: tuple(dict, str)
592 groups = re.search(self.REGEX_PLR_RATE, msg)
594 if groups is not None:
596 throughput[u"LOWER"] = float(groups.group(1))
597 throughput[u"UPPER"] = float(groups.group(2))
599 except (IndexError, ValueError):
602 return throughput, status
604 def _get_ndrpdr_latency(self, msg):
605 """Get LATENCY from the test message.
607 :param msg: The test message to be parsed.
609 :returns: Parsed data as a dict and the status (PASS/FAIL).
610 :rtype: tuple(dict, str)
620 u"direction1": copy.copy(latency_default),
621 u"direction2": copy.copy(latency_default)
624 u"direction1": copy.copy(latency_default),
625 u"direction2": copy.copy(latency_default)
628 u"direction1": copy.copy(latency_default),
629 u"direction2": copy.copy(latency_default)
632 u"direction1": copy.copy(latency_default),
633 u"direction2": copy.copy(latency_default)
636 u"direction1": copy.copy(latency_default),
637 u"direction2": copy.copy(latency_default)
640 u"direction1": copy.copy(latency_default),
641 u"direction2": copy.copy(latency_default)
644 u"direction1": copy.copy(latency_default),
645 u"direction2": copy.copy(latency_default)
648 u"direction1": copy.copy(latency_default),
649 u"direction2": copy.copy(latency_default)
652 u"direction1": copy.copy(latency_default),
653 u"direction2": copy.copy(latency_default)
657 groups = re.search(self.REGEX_NDRPDR_LAT, msg)
659 groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
662 return latency, u"FAIL"
664 def process_latency(in_str):
665 """Return object with parsed latency values.
667 TODO: Define class for the return type.
669 :param in_str: Input string, min/avg/max/hdrh format.
671 :returns: Dict with corresponding keys, except hdrh float values.
673 :throws IndexError: If in_str does not have enough substrings.
674 :throws ValueError: If a substring does not convert to float.
676 in_list = in_str.split('/', 3)
679 u"min": float(in_list[0]),
680 u"avg": float(in_list[1]),
681 u"max": float(in_list[2]),
685 if len(in_list) == 4:
686 rval[u"hdrh"] = str(in_list[3])
691 latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
692 latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
693 latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
694 latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
695 if groups.lastindex == 4:
696 return latency, u"PASS"
697 except (IndexError, ValueError):
701 latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
702 latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
703 latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
704 latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
705 latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
706 latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
707 latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
708 latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
709 latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
710 latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
711 latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
712 latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
713 latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
714 latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
715 return latency, u"PASS"
716 except (IndexError, ValueError):
719 return latency, u"FAIL"
721 def visit_suite(self, suite):
722 """Implements traversing through the suite and its direct children.
724 :param suite: Suite to process.
728 if self.start_suite(suite) is not False:
729 suite.suites.visit(self)
730 suite.tests.visit(self)
731 self.end_suite(suite)
733 def start_suite(self, suite):
734 """Called when suite starts.
736 :param suite: Suite to process.
742 parent_name = suite.parent.name
743 except AttributeError:
746 doc_str = suite.doc.\
747 replace(u'"', u"'").\
748 replace(u'\n', u' ').\
749 replace(u'\r', u'').\
750 replace(u'*[', u' |br| *[').\
751 replace(u"*", u"**").\
752 replace(u' |br| *[', u'*[', 1)
754 self._data[u"suites"][suite.longname.lower().
756 replace(u" ", u"_")] = {
757 u"name": suite.name.lower(),
759 u"parent": parent_name,
760 u"level": len(suite.longname.split(u"."))
763 suite.keywords.visit(self)
765 def end_suite(self, suite):
766 """Called when suite ends.
768 :param suite: Suite to process.
773 def visit_test(self, test):
774 """Implements traversing through the test.
776 :param test: Test to process.
780 if self.start_test(test) is not False:
781 test.keywords.visit(self)
784 def start_test(self, test):
785 """Called when test starts.
787 :param test: Test to process.
792 longname_orig = test.longname.lower()
794 # Check the ignore list
795 if longname_orig in self._ignore:
798 tags = [str(tag) for tag in test.tags]
801 # Change the TC long name and name if defined in the mapping table
802 longname = self._mapping.get(longname_orig, None)
803 if longname is not None:
804 name = longname.split(u'.')[-1]
806 f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
810 longname = longname_orig
811 name = test.name.lower()
813 # Remove TC number from the TC long name (backward compatibility):
814 self._test_id = re.sub(self.REGEX_TC_NUMBER, u"", longname)
815 # Remove TC number from the TC name (not needed):
816 test_result[u"name"] = re.sub(self.REGEX_TC_NUMBER, "", name)
818 test_result[u"parent"] = test.parent.name.lower()
819 test_result[u"tags"] = tags
820 test_result["doc"] = test.doc.\
821 replace(u'"', u"'").\
822 replace(u'\n', u' ').\
823 replace(u'\r', u'').\
824 replace(u'[', u' |br| [').\
825 replace(u' |br| [', u'[', 1)
826 test_result[u"msg"] = test.message.\
827 replace(u'\n', u' |br| ').\
828 replace(u'\r', u'').\
830 test_result[u"type"] = u"FUNC"
831 test_result[u"status"] = test.status
833 if u"PERFTEST" in tags:
834 # Replace info about cores (e.g. -1c-) with the info about threads
835 # and cores (e.g. -1t1c-) in the long test case names and in the
836 # test case names if necessary.
837 groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
841 for tag in test_result[u"tags"]:
842 groups = re.search(self.REGEX_TC_TAG, tag)
848 self._test_id = re.sub(self.REGEX_TC_NAME_NEW,
849 f"-{tag_tc.lower()}-",
852 test_result[u"name"] = re.sub(self.REGEX_TC_NAME_NEW,
853 f"-{tag_tc.lower()}-",
857 test_result[u"status"] = u"FAIL"
858 self._data[u"tests"][self._test_id] = test_result
860 f"The test {self._test_id} has no or more than one "
861 f"multi-threading tags.\n"
862 f"Tags: {test_result[u'tags']}"
866 if test.status == u"PASS":
867 if u"NDRPDR" in tags:
868 test_result[u"type"] = u"NDRPDR"
869 test_result[u"throughput"], test_result[u"status"] = \
870 self._get_ndrpdr_throughput(test.message)
871 test_result[u"latency"], test_result[u"status"] = \
872 self._get_ndrpdr_latency(test.message)
873 elif u"SOAK" in tags:
874 test_result[u"type"] = u"SOAK"
875 test_result[u"throughput"], test_result[u"status"] = \
876 self._get_plr_throughput(test.message)
878 test_result[u"type"] = u"TCP"
879 groups = re.search(self.REGEX_TCP, test.message)
880 test_result[u"result"] = int(groups.group(2))
881 elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
883 test_result[u"type"] = u"MRR"
885 test_result[u"type"] = u"BMRR"
887 test_result[u"result"] = dict()
888 groups = re.search(self.REGEX_BMRR, test.message)
889 if groups is not None:
890 items_str = groups.group(1)
891 items_float = [float(item.strip()) for item
892 in items_str.split(",")]
893 # Use whole list in CSIT-1180.
894 stats = jumpavg.AvgStdevStats.for_runs(items_float)
895 test_result[u"result"][u"receive-rate"] = stats.avg
897 groups = re.search(self.REGEX_MRR, test.message)
898 test_result[u"result"][u"receive-rate"] = \
899 float(groups.group(3)) / float(groups.group(1))
900 elif u"RECONF" in tags:
901 test_result[u"type"] = u"RECONF"
902 test_result[u"result"] = None
904 grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
905 grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
906 test_result[u"result"] = {
907 u"loss": int(grps_loss.group(1)),
908 u"time": float(grps_time.group(1))
910 except (AttributeError, IndexError, ValueError, TypeError):
911 test_result[u"status"] = u"FAIL"
913 test_result[u"status"] = u"FAIL"
914 self._data[u"tests"][self._test_id] = test_result
917 self._data[u"tests"][self._test_id] = test_result
919 def end_test(self, test):
920 """Called when test ends.
922 :param test: Test to process.
927 def visit_keyword(self, keyword):
928 """Implements traversing through the keyword and its child keywords.
930 :param keyword: Keyword to process.
931 :type keyword: Keyword
934 if self.start_keyword(keyword) is not False:
935 self.end_keyword(keyword)
937 def start_keyword(self, keyword):
938 """Called when keyword starts. Default implementation does nothing.
940 :param keyword: Keyword to process.
941 :type keyword: Keyword
945 if keyword.type == u"setup":
946 self.visit_setup_kw(keyword)
947 elif keyword.type == u"teardown":
948 self._lookup_kw_nr = 0
949 self.visit_teardown_kw(keyword)
951 self._lookup_kw_nr = 0
952 self.visit_test_kw(keyword)
953 except AttributeError:
956 def end_keyword(self, keyword):
957 """Called when keyword ends. Default implementation does nothing.
959 :param keyword: Keyword to process.
960 :type keyword: Keyword
964 def visit_test_kw(self, test_kw):
965 """Implements traversing through the test keyword and its child
968 :param test_kw: Keyword to process.
969 :type test_kw: Keyword
972 for keyword in test_kw.keywords:
973 if self.start_test_kw(keyword) is not False:
974 self.visit_test_kw(keyword)
975 self.end_test_kw(keyword)
977 def start_test_kw(self, test_kw):
978 """Called when test keyword starts. Default implementation does
981 :param test_kw: Keyword to process.
982 :type test_kw: Keyword
985 if test_kw.name.count(u"Show Runtime On All Duts") or \
986 test_kw.name.count(u"Show Runtime Counters On All Duts"):
987 self._lookup_kw_nr += 1
988 self._show_run_lookup_nr = 0
989 self._msg_type = u"test-show-runtime"
990 elif test_kw.name.count(u"Install Dpdk Test") and not self._version:
991 self._msg_type = u"dpdk-version"
994 test_kw.messages.visit(self)
996 def end_test_kw(self, test_kw):
997 """Called when keyword ends. Default implementation does nothing.
999 :param test_kw: Keyword to process.
1000 :type test_kw: Keyword
1004 def visit_setup_kw(self, setup_kw):
1005 """Implements traversing through the teardown keyword and its child
1008 :param setup_kw: Keyword to process.
1009 :type setup_kw: Keyword
1012 for keyword in setup_kw.keywords:
1013 if self.start_setup_kw(keyword) is not False:
1014 self.visit_setup_kw(keyword)
1015 self.end_setup_kw(keyword)
1017 def start_setup_kw(self, setup_kw):
1018 """Called when teardown keyword starts. Default implementation does
1021 :param setup_kw: Keyword to process.
1022 :type setup_kw: Keyword
1025 if setup_kw.name.count(u"Show Vpp Version On All Duts") \
1026 and not self._version:
1027 self._msg_type = u"vpp-version"
1028 elif setup_kw.name.count(u"Set Global Variable") \
1029 and not self._timestamp:
1030 self._msg_type = u"timestamp"
1031 elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
1032 self._msg_type = u"testbed"
1035 setup_kw.messages.visit(self)
1037 def end_setup_kw(self, setup_kw):
1038 """Called when keyword ends. Default implementation does nothing.
1040 :param setup_kw: Keyword to process.
1041 :type setup_kw: Keyword
1045 def visit_teardown_kw(self, teardown_kw):
1046 """Implements traversing through the teardown keyword and its child
1049 :param teardown_kw: Keyword to process.
1050 :type teardown_kw: Keyword
1053 for keyword in teardown_kw.keywords:
1054 if self.start_teardown_kw(keyword) is not False:
1055 self.visit_teardown_kw(keyword)
1056 self.end_teardown_kw(keyword)
1058 def start_teardown_kw(self, teardown_kw):
1059 """Called when teardown keyword starts
1061 :param teardown_kw: Keyword to process.
1062 :type teardown_kw: Keyword
1066 if teardown_kw.name.count(u"Show Vat History On All Duts"):
1067 # TODO: Remove when not needed:
1068 self._conf_history_lookup_nr = 0
1069 self._msg_type = u"teardown-vat-history"
1070 teardown_kw.messages.visit(self)
1071 elif teardown_kw.name.count(u"Show Papi History On All Duts"):
1072 self._conf_history_lookup_nr = 0
1073 self._msg_type = u"teardown-papi-history"
1074 teardown_kw.messages.visit(self)
1076 def end_teardown_kw(self, teardown_kw):
1077 """Called when keyword ends. Default implementation does nothing.
1079 :param teardown_kw: Keyword to process.
1080 :type teardown_kw: Keyword
1084 def visit_message(self, msg):
1085 """Implements visiting the message.
1087 :param msg: Message to process.
1091 if self.start_message(msg) is not False:
1092 self.end_message(msg)
1094 def start_message(self, msg):
1095 """Called when message starts. Get required information from messages:
1098 :param msg: Message to process.
1104 self.parse_msg[self._msg_type](msg)
1106 def end_message(self, msg):
1107 """Called when message ends. Default implementation does nothing.
1109 :param msg: Message to process.
1118 The data is extracted from output.xml files generated by Jenkins jobs and
1119 stored in pandas' DataFrames.
1125 (as described in ExecutionChecker documentation)
1127 (as described in ExecutionChecker documentation)
1129 (as described in ExecutionChecker documentation)
1132 def __init__(self, spec):
1135 :param spec: Specification.
1136 :type spec: Specification
1143 self._input_data = pd.Series()
1147 """Getter - Input data.
1149 :returns: Input data
1150 :rtype: pandas.Series
1152 return self._input_data
1154 def metadata(self, job, build):
1155 """Getter - metadata
1157 :param job: Job which metadata we want.
1158 :param build: Build which metadata we want.
1162 :rtype: pandas.Series
1165 return self.data[job][build][u"metadata"]
1167 def suites(self, job, build):
1170 :param job: Job which suites we want.
1171 :param build: Build which suites we want.
1175 :rtype: pandas.Series
1178 return self.data[job][str(build)][u"suites"]
1180 def tests(self, job, build):
1183 :param job: Job which tests we want.
1184 :param build: Build which tests we want.
1188 :rtype: pandas.Series
1191 return self.data[job][build][u"tests"]
1193 def _parse_tests(self, job, build, log):
1194 """Process data from robot output.xml file and return JSON structured
1197 :param job: The name of job which build output data will be processed.
1198 :param build: The build which output data will be processed.
1199 :param log: List of log messages.
1202 :type log: list of tuples (severity, msg)
1203 :returns: JSON data structure.
1212 with open(build[u"file-name"], u'r') as data_file:
1214 result = ExecutionResult(data_file)
1215 except errors.DataError as err:
1217 (u"ERROR", f"Error occurred while parsing output.xml: "
1221 checker = ExecutionChecker(metadata, self._cfg.mapping,
1223 result.visit(checker)
1227 def _download_and_parse_build(self, job, build, repeat, pid=10000):
1228 """Download and parse the input data file.
1230 :param pid: PID of the process executing this method.
1231 :param job: Name of the Jenkins job which generated the processed input
1233 :param build: Information about the Jenkins build which generated the
1234 processed input file.
1235 :param repeat: Repeat the download specified number of times if not
1246 (u"INFO", f" Processing the job/build: {job}: {build[u'build']}")
1254 success = download_and_unzip_data_file(self._cfg, job, build, pid,
1262 f"It is not possible to download the input data file from the "
1263 f"job {job}, build {build[u'build']}, or it is damaged. "
1269 f" Processing data from the build {build[u'build']} ...")
1271 data = self._parse_tests(job, build, logs)
1275 f"Input data file from the job {job}, build "
1276 f"{build[u'build']} is damaged. Skipped.")
1279 state = u"processed"
1282 remove(build[u"file-name"])
1283 except OSError as err:
1285 ("ERROR", f"Cannot remove the file {build[u'file-name']}: "
1289 # If the time-period is defined in the specification file, remove all
1290 # files which are outside the time period.
1291 timeperiod = self._cfg.input.get(u"time-period", None)
1292 if timeperiod and data:
1294 timeperiod = timedelta(int(timeperiod))
1295 metadata = data.get(u"metadata", None)
1297 generated = metadata.get(u"generated", None)
1299 generated = dt.strptime(generated, u"%Y%m%d %H:%M")
1300 if (now - generated) > timeperiod:
1301 # Remove the data and the file:
1306 f" The build {job}/{build[u'build']} is "
1307 f"outdated, will be removed.")
1309 logs.append((u"INFO", u" Done."))
1311 for level, line in logs:
1312 if level == u"INFO":
1314 elif level == u"ERROR":
1316 elif level == u"DEBUG":
1318 elif level == u"CRITICAL":
1319 logging.critical(line)
1320 elif level == u"WARNING":
1321 logging.warning(line)
1323 return {u"data": data, u"state": state, u"job": job, u"build": build}
1325 def download_and_parse_data(self, repeat=1):
1326 """Download the input data files, parse input data from input files and
1327 store in pandas' Series.
1329 :param repeat: Repeat the download specified number of times if not
1334 logging.info(u"Downloading and parsing input files ...")
1336 for job, builds in self._cfg.builds.items():
1337 for build in builds:
1339 result = self._download_and_parse_build(job, build, repeat)
1340 build_nr = result[u"build"][u"build"]
1343 data = result[u"data"]
1344 build_data = pd.Series({
1345 u"metadata": pd.Series(
1346 list(data[u"metadata"].values()),
1347 index=list(data[u"metadata"].keys())
1349 u"suites": pd.Series(
1350 list(data[u"suites"].values()),
1351 index=list(data[u"suites"].keys())
1353 u"tests": pd.Series(
1354 list(data[u"tests"].values()),
1355 index=list(data[u"tests"].keys())
1359 if self._input_data.get(job, None) is None:
1360 self._input_data[job] = pd.Series()
1361 self._input_data[job][str(build_nr)] = build_data
1363 self._cfg.set_input_file_name(
1364 job, build_nr, result[u"build"][u"file-name"])
1366 self._cfg.set_input_state(job, build_nr, result[u"state"])
1369 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
1370 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
1372 logging.info(u"Done.")
1375 def _end_of_tag(tag_filter, start=0, closer=u"'"):
1376 """Return the index of character in the string which is the end of tag.
1378 :param tag_filter: The string where the end of tag is being searched.
1379 :param start: The index where the searching is stated.
1380 :param closer: The character which is the tag closer.
1381 :type tag_filter: str
1384 :returns: The index of the tag closer.
1389 idx_opener = tag_filter.index(closer, start)
1390 return tag_filter.index(closer, idx_opener + 1)
1395 def _condition(tag_filter):
1396 """Create a conditional statement from the given tag filter.
1398 :param tag_filter: Filter based on tags from the element specification.
1399 :type tag_filter: str
1400 :returns: Conditional statement which can be evaluated.
1406 index = InputData._end_of_tag(tag_filter, index)
1410 tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
1412 def filter_data(self, element, params=None, data=None, data_set=u"tests",
1413 continue_on_error=False):
1414 """Filter required data from the given jobs and builds.
1416 The output data structure is:
1420 - test (or suite) 1 ID:
1426 - test (or suite) n ID:
1433 :param element: Element which will use the filtered data.
1434 :param params: Parameters which will be included in the output. If None,
1435 all parameters are included.
1436 :param data: If not None, this data is used instead of data specified
1438 :param data_set: The set of data to be filtered: tests, suites,
1440 :param continue_on_error: Continue if there is error while reading the
1441 data. The Item will be empty then
1442 :type element: pandas.Series
1446 :type continue_on_error: bool
1447 :returns: Filtered data.
1448 :rtype pandas.Series
1452 if element[u"filter"] in (u"all", u"template"):
1455 cond = InputData._condition(element[u"filter"])
1456 logging.debug(f" Filter: {cond}")
1458 logging.error(u" No filter defined.")
1462 params = element.get(u"parameters", None)
1464 params.append(u"type")
1466 data_to_filter = data if data else element[u"data"]
1469 for job, builds in data_to_filter.items():
1470 data[job] = pd.Series()
1471 for build in builds:
1472 data[job][str(build)] = pd.Series()
1475 self.data[job][str(build)][data_set].items())
1477 if continue_on_error:
1481 for test_id, test_data in data_dict.items():
1482 if eval(cond, {u"tags": test_data.get(u"tags", u"")}):
1483 data[job][str(build)][test_id] = pd.Series()
1485 for param, val in test_data.items():
1486 data[job][str(build)][test_id][param] = val
1488 for param in params:
1490 data[job][str(build)][test_id][param] =\
1493 data[job][str(build)][test_id][param] =\
1497 except (KeyError, IndexError, ValueError) as err:
1499 f"Missing mandatory parameter in the element specification: "
1503 except AttributeError as err:
1504 logging.error(repr(err))
1506 except SyntaxError as err:
1508 f"The filter {cond} is not correct. Check if all tags are "
1509 f"enclosed by apostrophes.\n{repr(err)}"
1513 def filter_tests_by_name(self, element, params=None, data_set=u"tests",
1514 continue_on_error=False):
1515 """Filter required data from the given jobs and builds.
1517 The output data structure is:
1521 - test (or suite) 1 ID:
1527 - test (or suite) n ID:
1534 :param element: Element which will use the filtered data.
1535 :param params: Parameters which will be included in the output. If None,
1536 all parameters are included.
1537 :param data_set: The set of data to be filtered: tests, suites,
1539 :param continue_on_error: Continue if there is error while reading the
1540 data. The Item will be empty then
1541 :type element: pandas.Series
1544 :type continue_on_error: bool
1545 :returns: Filtered data.
1546 :rtype pandas.Series
1549 include = element.get(u"include", None)
1551 logging.warning(u"No tests to include, skipping the element.")
1555 params = element.get(u"parameters", None)
1557 params.append(u"type")
1561 for job, builds in element[u"data"].items():
1562 data[job] = pd.Series()
1563 for build in builds:
1564 data[job][str(build)] = pd.Series()
1565 for test in include:
1567 reg_ex = re.compile(str(test).lower())
1568 for test_id in self.data[job][
1569 str(build)][data_set].keys():
1570 if re.match(reg_ex, str(test_id).lower()):
1571 test_data = self.data[job][
1572 str(build)][data_set][test_id]
1573 data[job][str(build)][test_id] = pd.Series()
1575 for param, val in test_data.items():
1576 data[job][str(build)][test_id]\
1579 for param in params:
1581 data[job][str(build)][
1585 data[job][str(build)][
1586 test_id][param] = u"No Data"
1587 except KeyError as err:
1588 logging.error(repr(err))
1589 if continue_on_error:
1594 except (KeyError, IndexError, ValueError) as err:
1596 f"Missing mandatory parameter in the element "
1597 f"specification: {repr(err)}"
1600 except AttributeError as err:
1601 logging.error(repr(err))
1605 def merge_data(data):
1606 """Merge data from more jobs and builds to a simple data structure.
1608 The output data structure is:
1610 - test (suite) 1 ID:
1616 - test (suite) n ID:
1619 :param data: Data to merge.
1620 :type data: pandas.Series
1621 :returns: Merged data.
1622 :rtype: pandas.Series
1625 logging.info(u" Merging data ...")
1627 merged_data = pd.Series()
1628 for builds in data.values:
1629 for item in builds.values:
1630 for item_id, item_data in item.items():
1631 merged_data[item_id] = item_data