-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
import logging
import re
+from difflib import SequenceMatcher
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from os.path import isdir
-from collections import OrderedDict
+from collections import OrderedDict, defaultdict
-from pal_utils import get_last_completed_build_number
from pal_errors import PresentationError
self._spec = spec
+ self.error_msgs = list()
+
try:
self._spec_alert = spec.alerting
except KeyError as err:
- raise AlertingError(u"Alerting is not configured, skipped.",
- repr(err),
- u"WARNING")
+ raise AlertingError(
+ u"Alerting is not configured, skipped.", repr(err), u"WARNING"
+ )
self._path_failed_tests = spec.environment[u"paths"][u"DIR[STATIC,VPP]"]
:type sort: bool
:returns: CSIT build number, VPP version, Number of passed tests,
Number of failed tests, Compressed failed tests.
- :rtype: tuple(str, str, int, int, OrderedDict)
+ :rtype: tuple(str, str, int, int, str, OrderedDict)
"""
directory = self.configs[alert[u"way"]][u"output-dir"]
- failed_tests = OrderedDict()
+ failed_tests = defaultdict(dict)
file_path = f"{directory}/{test_set}.txt"
version = u""
try:
if idx == 3:
failed = line[:-1]
continue
+ if idx == 4:
+ minutes = int(line[:-1]) // 60000
+ duration = f"{(minutes // 60):02d}:{(minutes % 60):02d}"
+ continue
try:
- test = line[:-1].split(u'-')
+ line, error_msg = line[:-1].split(u'###', maxsplit=1)
+ test = line.split(u'-')
name = u'-'.join(test[3:-1])
- except IndexError:
+ if len(error_msg) > 128:
+ if u";" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u';', 1)[0]}..."
+ elif u":" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u':', 1)[0]}..."
+ elif u"." in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u'.', 1)[0]}..."
+ elif u"?" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u'?', 1)[0]}..."
+ elif u"!" in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u'!', 1)[0]}..."
+ elif u"," in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u',', 1)[0]}..."
+ elif u" " in error_msg[128:256]:
+ error_msg = \
+ f"{error_msg[:128]}" \
+ f"{error_msg[128:].split(u' ', 1)[0]}..."
+ else:
+ error_msg = error_msg[:128]
+
+ except ValueError:
continue
- if failed_tests.get(name, None) is None:
- failed_tests[name] = dict(nics=list(),
- framesizes=list(),
- cores=list())
- if test[0] not in failed_tests[name][u"nics"]:
- failed_tests[name][u"nics"].append(test[0])
- if test[1] not in failed_tests[name][u"framesizes"]:
- failed_tests[name][u"framesizes"].append(test[1])
- if test[2] not in failed_tests[name][u"cores"]:
- failed_tests[name][u"cores"].append(test[2])
+
+ for e_msg in self.error_msgs:
+ if SequenceMatcher(None, e_msg,
+ error_msg).ratio() > 0.5:
+ error_msg = e_msg
+ break
+ if error_msg not in self.error_msgs:
+ self.error_msgs.append(error_msg)
+
+ error_msg_index = self.error_msgs.index(error_msg)
+
+ if failed_tests.get(name, {}).get(error_msg_index) is None:
+ failed_tests[name][error_msg_index] = \
+ dict(nics=list(),
+ framesizes=list(),
+ cores=list())
+
+ if test[0] not in \
+ failed_tests[name][error_msg_index][u"nics"]:
+ failed_tests[name][error_msg_index][u"nics"].\
+ append(test[0])
+ if test[1] not in \
+ failed_tests[name][error_msg_index][u"framesizes"]:
+ failed_tests[name][error_msg_index][u"framesizes"].\
+ append(test[1])
+ check_core = test[2] + f"[{str(error_msg_index)}]"
+ if check_core not in \
+ failed_tests[name][error_msg_index][u"cores"]:
+ failed_tests[name][error_msg_index][u"cores"].\
+ append(test[2] + "[" + str(error_msg_index) + "]")
+
except IOError:
logging.error(f"No such file or directory: {file_path}")
- return None, None, None, None, None
+ return None, None, None, None, None, None
if sort:
sorted_failed_tests = OrderedDict()
for key in sorted(failed_tests.keys()):
sorted_failed_tests[key] = failed_tests[key]
- return build, version, passed, failed, sorted_failed_tests
+ return build, version, passed, failed, duration, sorted_failed_tests
- return build, version, passed, failed, failed_tests
+ return build, version, passed, failed, duration, failed_tests
def _list_gressions(self, alert, idx, header, re_pro):
"""Create a file with regressions or progressions for the test set
:param idx: Index of the test set as it is specified in the
specification file.
:param header: The header of the list of [re|pro]gressions.
- :param re_pro: 'regression' or 'progression'.
+ :param re_pro: 'regressions' or 'progressions'.
:type alert: dict
:type idx: int
:type header: str
)
text = u""
+
+ legend = (f"Legend: Test-name NIC Frame-size Trend[Mpps] Runs[#] "
+ f"Long-Term change[%]")
+
+ out_file = (
+ f"{self.configs[alert[u'way']][u'output-dir']}/"
+ f"trending-regressions.txt"
+ )
+ try:
+ with open(out_file, u'w') as reg_file:
+ reg_file.write(legend)
+ except IOError:
+ logging.error(f"Not possible to write the file {out_file}.txt.")
+
+ out_file = (
+ f"{self.configs[alert[u'way']][u'output-dir']}/"
+ f"trending-progressions.txt"
+ )
+ try:
+ with open(out_file, u'w') as reg_file:
+ reg_file.write(legend)
+ except IOError:
+ logging.error(f"Not possible to write the file {out_file}.txt.")
+
for idx, test_set in enumerate(alert.get(u"include", list())):
test_set_short = u""
device = u""
try:
groups = re.search(
- re.compile(r'((vpp|dpdk)-\dn-(skx|clx|hsw|tsh|dnv)-.*)'),
+ re.compile(
+ r'((vpp|dpdk)-\dn-(skx|clx|tsh|dnv|zn2|tx2|icx|alt)-.*)'
+ ),
test_set
)
test_set_short = groups.group(1)
f"The test set {test_set} does not include information "
f"about test bed. Using empty string instead."
)
- build, version, passed, failed, failed_tests = \
+ build, version, passed, failed, duration, failed_tests = \
self._get_compressed_failed_tests(alert, test_set)
if build is None:
- ret_code, build_nr, _ = get_last_completed_build_number(
- self._spec.environment[u"urls"][u"URL[JENKINS,CSIT]"],
- alert[u"urls"][idx].split(u'/')[-1])
- if ret_code != 0:
- build_nr = u''
text += (
f"\n\nNo input data available for {test_set_short}. "
- f"See CSIT build {alert[u'urls'][idx]}/{build_nr} for more "
+ f"See CSIT job {alert[u'urls'][idx]} for more "
f"information.\n"
)
continue
text += (
- f"\n\n{test_set_short}, {failed} tests failed, {passed} tests "
- f"passed, CSIT build: {alert[u'urls'][idx]}/{build}, "
+ f"\n\n{test_set_short}, "
+ f"{failed} tests failed, "
+ f"{passed} tests passed, "
+ f"duration: {duration}, "
+ f"CSIT build: {alert[u'urls'][idx]}/{build}, "
f"{device} version: {version}\n\n"
)
max_len = MaxLens(0, 0, 0, 0)
- for name, params in failed_tests.items():
- failed_tests[name][u"nics"] = u",".join(sorted(params[u"nics"]))
- failed_tests[name][u"framesizes"] = \
- u",".join(sorted(params[u"framesizes"]))
- failed_tests[name][u"cores"] = \
- u",".join(sorted(params[u"cores"]))
- if len(name) > max_len.name:
- max_len.name = len(name)
- if len(failed_tests[name][u"nics"]) > max_len.nics:
- max_len.nics = len(failed_tests[name][u"nics"])
- if len(failed_tests[name][u"framesizes"]) > max_len.frmsizes:
- max_len.frmsizes = len(failed_tests[name][u"framesizes"])
- if len(failed_tests[name][u"cores"]) > max_len.cores:
- max_len.cores = len(failed_tests[name][u"cores"])
-
- for name, params in failed_tests.items():
- text += (
- f"{name + u' ' * (max_len.name - len(name))} "
- f"{params[u'nics']}"
- f"{u' ' * (max_len.nics - len(params[u'nics']))} "
- f"{params[u'framesizes']}"
- f"{u' ' * (max_len.frmsizes-len(params[u'framesizes']))} "
- f"{params[u'cores']}"
- f"{u' ' * (max_len.cores - len(params[u'cores']))}\n"
- )
+ for test, message in failed_tests.items():
+ for e_message, params in message.items():
+ failed_tests[test][e_message][u"nics"] = \
+ u" ".join(sorted(params[u"nics"]))
+ failed_tests[test][e_message][u"framesizes"] = \
+ u" ".join(sorted(params[u"framesizes"]))
+ failed_tests[test][e_message][u"cores"] = \
+ u" ".join(sorted(params[u"cores"]))
+ if len(test) > max_len.name:
+ max_len.name = len(test)
+ if len(failed_tests[test][e_message][u"nics"]) > \
+ max_len.nics:
+ max_len.nics = \
+ len(failed_tests[test][e_message][u"nics"])
+ if len(failed_tests[test][e_message][u"framesizes"]) > \
+ max_len.frmsizes:
+ max_len.frmsizes = \
+ len(failed_tests[test][e_message][u"framesizes"])
+ if len(failed_tests[test][e_message][u"cores"]) > \
+ max_len.cores:
+ max_len.cores = \
+ len(failed_tests[test][e_message][u"cores"])
+
+ for test, message in failed_tests.items():
+ test_added = False
+ for e_message, params in message.items():
+ if not test_added:
+ test_added = True
+ else:
+ test = ""
+ text += (
+ f"{test + u' ' * (max_len.name - len(test))} "
+ f"{params[u'nics']}"
+ f"{u' ' * (max_len.nics - len(params[u'nics']))} "
+ f"{params[u'framesizes']}"
+ f"""{u' ' * (max_len.frmsizes
+ - len(params[u'framesizes']))} """
+ f"{params[u'cores']}"
+ f"{u' ' * (max_len.cores - len(params[u'cores']))}\n"
+ )
gression_hdr = (
f"\n\n{test_set_short}, "
f"{self.configs[alert[u'way']][u'output-file']}"
logging.info(f"Writing the file {file_name}.txt ...")
+ text += f"\n\nLegend:\n\n"
+
+ for e_msg in self.error_msgs:
+ text += f"[{self.error_msgs.index(e_msg)}] - {e_msg}\n"
+
try:
with open(f"{file_name}.txt", u'w') as txt_file:
txt_file.write(text)