+ :param alert: Files are created for this alert.
+ :param test_set: Specifies which set of tests will be included in the
+ result. Its name is the same as the name of file with failed tests.
+ :param sort: If True, the failed tests are sorted alphabetically.
+ :type alert: dict
+ :type test_set: str
+ :type sort: bool
+ :returns: CSIT build number, VPP version, Number of passed tests,
+ Number of failed tests, Compressed failed tests.
+ :rtype: tuple(str, str, int, int, str, OrderedDict)
+ """
+
+ directory = self.configs[alert[u"way"]][u"output-dir"]
+ failed_tests = defaultdict(dict)
+ file_path = f"{directory}/{test_set}.txt"
+ version = u""
+ try:
+ with open(file_path, u'r') as f_txt:
+ for idx, line in enumerate(f_txt):
+ if idx == 0:
+ build = line[:-1]
+ continue
+ if idx == 1:
+ version = line[:-1]
+ continue
+ if idx == 2:
+ passed = line[:-1]
+ continue
+ if idx == 3:
+ failed = line[:-1]
+ continue
+ if idx == 4:
+ minutes = int(line[:-1]) // 60000
+ duration = f"{(minutes // 60):02d}:{(minutes % 60):02d}"
+ continue
+ try:
+ line, error_msg = line[:-1].split(u'###', maxsplit=1)
+ test = line.split(u'-')
+ name = u'-'.join(test[3:-1])
+ if len(error_msg) > 128:
+ if u";" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u';', 1)[0]}..."
+ elif u":" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u':', 1)[0]}..."
+ elif u"." in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u'.', 1)[0]}..."
+ elif u"?" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u'?', 1)[0]}..."
+ elif u"!" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u'!', 1)[0]}..."
+ elif u"," in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u',', 1)[0]}..."
+ elif u" " in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u' ', 1)[0]}..."
+ else:
+ error_msg = error_msg[:128]
+
+ except ValueError:
+ continue
+
+ for e_msg in self.error_msgs:
+ if SequenceMatcher(None, e_msg,
+ error_msg).ratio() > 0.5:
+ error_msg = e_msg
+ break
+ if error_msg not in self.error_msgs:
+ self.error_msgs.append(error_msg)
+
+ error_msg_index = self.error_msgs.index(error_msg)
+
+ if failed_tests.get(name, {}).get(error_msg_index) is None:
+ failed_tests[name][error_msg_index] = \
+ dict(nics=list(),
+ framesizes=list(),
+ cores=list())
+
+ if test[0] not in \
+ failed_tests[name][error_msg_index][u"nics"]:
+ failed_tests[name][error_msg_index][u"nics"].\
+ append(test[0])
+ if test[1] not in \
+ failed_tests[name][error_msg_index][u"framesizes"]:
+ failed_tests[name][error_msg_index][u"framesizes"].\
+ append(test[1])
+ check_core = test[2] + f"[{str(error_msg_index)}]"
+ if check_core not in \
+ failed_tests[name][error_msg_index][u"cores"]:
+ failed_tests[name][error_msg_index][u"cores"].\
+ append(test[2] + "[" + str(error_msg_index) + "]")
+
+ except IOError:
+ logging.error(f"No such file or directory: {file_path}")
+ return None, None, None, None, None, None
+ if sort:
+ sorted_failed_tests = OrderedDict()
+ for key in sorted(failed_tests.keys()):
+ sorted_failed_tests[key] = failed_tests[key]
+ return build, version, passed, failed, duration, sorted_failed_tests
+
+ return build, version, passed, failed, duration, failed_tests
+
+ def _list_gressions(self, alert, idx, header, re_pro):
+ """Create a file with regressions or progressions for the test set
+ specified by idx.
+
+ :param alert: Files are created for this alert.
+ :param idx: Index of the test set as it is specified in the
+ specification file.
+ :param header: The header of the list of [re|pro]gressions.
+ :param re_pro: 'regressions' or 'progressions'.