X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_alerts.py;h=9a0a03a59fdd752b57da65de213d6b4a2e0ab032;hp=c6446f80d9023fbf3f64ae2496c1a516fb0e638d;hb=03bf0dd6ea67ef2b1386733d0b2ce3489c6a7f3e;hpb=d1753114ad2fa79eb7ad5db535418a6a765c919a diff --git a/resources/tools/presentation/generator_alerts.py b/resources/tools/presentation/generator_alerts.py index c6446f80d9..9a0a03a59f 100644 --- a/resources/tools/presentation/generator_alerts.py +++ b/resources/tools/presentation/generator_alerts.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2021 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -22,12 +22,12 @@ import smtplib import logging import re +from difflib import SequenceMatcher from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from os.path import isdir -from collections import OrderedDict +from collections import OrderedDict, defaultdict -from pal_utils import get_last_completed_build_number from pal_errors import PresentationError @@ -81,12 +81,14 @@ class Alerting: self._spec = spec + self.error_msgs = list() + try: self._spec_alert = spec.alerting except KeyError as err: - raise AlertingError(u"Alerting is not configured, skipped.", - repr(err), - u"WARNING") + raise AlertingError( + u"Alerting is not configured, skipped.", repr(err), u"WARNING" + ) self._path_failed_tests = spec.environment[u"paths"][u"DIR[STATIC,VPP]"] @@ -247,11 +249,11 @@ class Alerting: :type sort: bool :returns: CSIT build number, VPP version, Number of passed tests, Number of failed tests, Compressed failed tests. - :rtype: tuple(str, str, int, int, OrderedDict) + :rtype: tuple(str, str, int, int, str, OrderedDict) """ directory = self.configs[alert[u"way"]][u"output-dir"] - failed_tests = OrderedDict() + failed_tests = defaultdict(dict) file_path = f"{directory}/{test_set}.txt" version = u"" try: @@ -269,31 +271,57 @@ class Alerting: if idx == 3: failed = line[:-1] continue + if idx == 4: + minutes = int(line[:-1]) // 60000 + duration = f"{(minutes // 60):02d}:{(minutes % 60):02d}" + continue try: - test = line[:-1].split(u'-') + line, error_msg = line[:-1].split(u'###', maxsplit=1) + test = line.split(u'-') name = u'-'.join(test[3:-1]) - except IndexError: + except ValueError: continue - if failed_tests.get(name, None) is None: - failed_tests[name] = dict(nics=list(), - framesizes=list(), - cores=list()) - if test[0] not in failed_tests[name][u"nics"]: - failed_tests[name][u"nics"].append(test[0]) - if test[1] not in failed_tests[name][u"framesizes"]: - failed_tests[name][u"framesizes"].append(test[1]) - if test[2] not in failed_tests[name][u"cores"]: - failed_tests[name][u"cores"].append(test[2]) + + for e_msg in self.error_msgs: + if SequenceMatcher(None, e_msg, + error_msg).ratio() > 0.5: + error_msg = e_msg + break + if error_msg not in self.error_msgs: + self.error_msgs.append(error_msg) + + error_msg_index = self.error_msgs.index(error_msg) + + if failed_tests.get(name, {}).get(error_msg_index) is None: + failed_tests[name][error_msg_index] = \ + dict(nics=list(), + framesizes=list(), + cores=list()) + + if test[0] not in \ + failed_tests[name][error_msg_index][u"nics"]: + failed_tests[name][error_msg_index][u"nics"].\ + append(test[0]) + if test[1] not in \ + failed_tests[name][error_msg_index][u"framesizes"]: + failed_tests[name][error_msg_index][u"framesizes"].\ + append(test[1]) + check_core = test[2] + f"[{str(error_msg_index)}]" + if check_core not in \ + failed_tests[name][error_msg_index][u"cores"]: + failed_tests[name][error_msg_index][u"cores"].\ + append(test[2] + "[" + str(error_msg_index) + "]") + except IOError: logging.error(f"No such file or directory: {file_path}") - return None, None, None, None, None + return None, None, None, None, None, None if sort: sorted_failed_tests = OrderedDict() for key in sorted(failed_tests.keys()): sorted_failed_tests[key] = failed_tests[key] - return build, version, passed, failed, sorted_failed_tests + return build, version, passed, failed, duration, sorted_failed_tests - return build, version, passed, failed, failed_tests + return build, version, passed, failed, duration, failed_tests def _list_gressions(self, alert, idx, header, re_pro): """Create a file with regressions or progressions for the test set @@ -348,35 +376,38 @@ class Alerting: text = u"" for idx, test_set in enumerate(alert.get(u"include", list())): + test_set_short = u"" + device = u"" try: - test_set_short = re.search( - re.compile(r'((vpp|dpdk)-\dn-(skx|clx|hsw|tsh|dnv)-.*)'), + groups = re.search( + re.compile( + r'((vpp|dpdk)-\dn-(skx|clx|tsh|dnv|zn2|tx2)-.*)' + ), test_set - ).group(1) + ) + test_set_short = groups.group(1) + device = groups.group(2) except (AttributeError, IndexError): logging.error( f"The test set {test_set} does not include information " f"about test bed. Using empty string instead." ) - test_set_short = u"" - build, version, passed, failed, failed_tests = \ + build, version, passed, failed, duration, failed_tests = \ self._get_compressed_failed_tests(alert, test_set) if build is None: - ret_code, build_nr, _ = get_last_completed_build_number( - self._spec.environment[u"urls"][u"URL[JENKINS,CSIT]"], - alert[u"urls"][idx].split(u'/')[-1]) - if ret_code != 0: - build_nr = u'' text += ( f"\n\nNo input data available for {test_set_short}. " - f"See CSIT build {alert[u'urls'][idx]}/{build_nr} for more " + f"See CSIT job {alert[u'urls'][idx]} for more " f"information.\n" ) continue text += ( - f"\n\n{test_set_short}, {failed} tests failed, {passed} tests " - f"passed, CSIT build: {alert[u'urls'][idx]}/{build}, " - f"VPP version: {version}\n\n" + f"\n\n{test_set_short}, " + f"{failed} tests failed, " + f"{passed} tests passed, " + f"duration: {duration}, " + f"CSIT build: {alert[u'urls'][idx]}/{build}, " + f"{device} version: {version}\n\n" ) class MaxLens(): @@ -398,36 +429,51 @@ class Alerting: max_len = MaxLens(0, 0, 0, 0) - for name, params in failed_tests.items(): - failed_tests[name][u"nics"] = u",".join(sorted(params[u"nics"])) - failed_tests[name][u"framesizes"] = \ - u",".join(sorted(params[u"framesizes"])) - failed_tests[name][u"cores"] = \ - u",".join(sorted(params[u"cores"])) - if len(name) > max_len.name: - max_len.name = len(name) - if len(failed_tests[name][u"nics"]) > max_len.nics: - max_len.nics = len(failed_tests[name][u"nics"]) - if len(failed_tests[name][u"framesizes"]) > max_len.frmsizes: - max_len.frmsizes = len(failed_tests[name][u"framesizes"]) - if len(failed_tests[name][u"cores"]) > max_len.cores: - max_len.cores = len(failed_tests[name][u"cores"]) - - for name, params in failed_tests.items(): - text += ( - f"{name + u' ' * (max_len.name - len(name))} " - f"{params[u'nics']}" - f"{u' ' * (max_len.nics - len(params[u'nics']))} " - f"{params[u'framesizes']}" - f"{u' ' * (max_len.frmsizes-len(params[u'framesizes']))} " - f"{params[u'cores']}" - f"{u' ' * (max_len.cores - len(params[u'cores']))}\n" - ) + for test, message in failed_tests.items(): + for e_message, params in message.items(): + failed_tests[test][e_message][u"nics"] = \ + u" ".join(sorted(params[u"nics"])) + failed_tests[test][e_message][u"framesizes"] = \ + u" ".join(sorted(params[u"framesizes"])) + failed_tests[test][e_message][u"cores"] = \ + u" ".join(sorted(params[u"cores"])) + if len(test) > max_len.name: + max_len.name = len(test) + if len(failed_tests[test][e_message][u"nics"]) > \ + max_len.nics: + max_len.nics = \ + len(failed_tests[test][e_message][u"nics"]) + if len(failed_tests[test][e_message][u"framesizes"]) > \ + max_len.frmsizes: + max_len.frmsizes = \ + len(failed_tests[test][e_message][u"framesizes"]) + if len(failed_tests[test][e_message][u"cores"]) > \ + max_len.cores: + max_len.cores = \ + len(failed_tests[test][e_message][u"cores"]) + + for test, message in failed_tests.items(): + test_added = False + for e_message, params in message.items(): + if not test_added: + test_added = True + else: + test = "" + text += ( + f"{test + u' ' * (max_len.name - len(test))} " + f"{params[u'nics']}" + f"{u' ' * (max_len.nics - len(params[u'nics']))} " + f"{params[u'framesizes']}" + f"""{u' ' * (max_len.frmsizes + - len(params[u'framesizes']))} """ + f"{params[u'cores']}" + f"{u' ' * (max_len.cores - len(params[u'cores']))}\n" + ) gression_hdr = ( f"\n\n{test_set_short}, " f"CSIT build: {alert[u'urls'][idx]}/{build}, " - f"VPP version: {version}\n\n" + f"{device} version: {version}\n\n" ) # Add list of regressions: self._list_gressions(alert, idx, gression_hdr, u"regressions") @@ -440,6 +486,11 @@ class Alerting: f"{self.configs[alert[u'way']][u'output-file']}" logging.info(f"Writing the file {file_name}.txt ...") + text += f"\n\nLegend:\n\n" + + for e_msg in self.error_msgs: + text += f"[{self.error_msgs.index(e_msg)}] - {e_msg}\n" + try: with open(f"{file_name}.txt", u'w') as txt_file: txt_file.write(text)