X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=test%2Fframework.py;h=cd39e1b311c6674d40e0ec7db0510fe075d6e81b;hb=47f352710824c373f8130ba632022b47cb27505b;hp=affd4481912004963005e67e1b241bdafe721782;hpb=1043fd38d83434d6c4e002c5354a30027988fbfa;p=vpp.git diff --git a/test/framework.py b/test/framework.py index affd4481912..cd39e1b311c 100644 --- a/test/framework.py +++ b/test/framework.py @@ -1,85 +1,68 @@ #!/usr/bin/env python3 from __future__ import print_function -import gc +import logging import sys import os import select import signal +import subprocess import unittest -import tempfile +import re import time import faulthandler import random import copy -import psutil import platform +import shutil from collections import deque from threading import Thread, Event from inspect import getdoc, isclass from traceback import format_exception from logging import FileHandler, DEBUG, Formatter +from enum import Enum +from abc import ABC, abstractmethod +from struct import pack, unpack import scapy.compat -from scapy.packet import Raw +from scapy.packet import Raw, Packet +from config import config, available_cpus, num_cpus, max_vpp_cpus import hook as hookmodule from vpp_pg_interface import VppPGInterface from vpp_sub_interface import VppSubInterface from vpp_lo_interface import VppLoInterface from vpp_bvi_interface import VppBviInterface from vpp_papi_provider import VppPapiProvider +from vpp_papi import VppEnum import vpp_papi from vpp_papi.vpp_stats import VPPStats -from vpp_papi.vpp_transport_shmem import VppTransportShmemIOError -from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \ - get_logger, colorize +from vpp_papi.vpp_transport_socket import VppTransportSocketIOError +from log import ( + RED, + GREEN, + YELLOW, + double_line_delim, + single_line_delim, + get_logger, + colorize, +) from vpp_object import VppObjectRegistry from util import ppp, is_core_present from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest from scapy.layers.inet6 import ICMPv6EchoReply +from vpp_running import use_running +from test_result_code import TestResultCode -if os.name == 'posix' and sys.version_info[0] < 3: - # using subprocess32 is recommended by python official documentation - # @ https://docs.python.org/2/library/subprocess.html - import subprocess32 as subprocess -else: - import subprocess - -# Python2/3 compatible -try: - input = raw_input -except NameError: - pass - -PASS = 0 -FAIL = 1 -ERROR = 2 -SKIP = 3 -TEST_RUN = 4 - - -class BoolEnvironmentVariable(object): - - def __init__(self, env_var_name, default='n', true_values=None): - self.name = env_var_name - self.default = default - self.true_values = true_values if true_values is not None else \ - ("y", "yes", "1") - def __bool__(self): - return os.getenv(self.name, self.default).lower() in self.true_values +logger = logging.getLogger(__name__) - if sys.version_info[0] == 2: - __nonzero__ = __bool__ +# Set up an empty logger for the testcase that can be overridden as necessary +null_logger = logging.getLogger("VppTestCase") +null_logger.addHandler(logging.NullHandler()) - def __repr__(self): - return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \ - (self.name, self.default, self.true_values) - -debug_framework = BoolEnvironmentVariable('TEST_DEBUG') -if debug_framework: +if config.debug_framework: import debug_internal """ @@ -91,10 +74,13 @@ if debug_framework: class VppDiedError(Exception): - """ exception for reporting that the subprocess has died.""" + """exception for reporting that the subprocess has died.""" - signals_by_value = {v: k for k, v in signal.__dict__.items() if - k.startswith('SIG') and not k.startswith('SIG_')} + signals_by_value = { + v: k + for k, v in signal.__dict__.items() + if k.startswith("SIG") and not k.startswith("SIG_") + } def __init__(self, rv=None, testcase=None, method_name=None): self.rv = rv @@ -108,15 +94,19 @@ class VppDiedError(Exception): pass if testcase is None and method_name is None: - in_msg = '' + in_msg = "" + else: + in_msg = " while running %s.%s" % (testcase, method_name) + + if self.rv: + msg = "VPP subprocess died unexpectedly%s with return code: %d%s." % ( + in_msg, + self.rv, + " [%s]" % (self.signal_name if self.signal_name is not None else ""), + ) else: - in_msg = 'running %s.%s ' % (testcase, method_name) + msg = "VPP subprocess died unexpectedly%s." % in_msg - msg = "VPP subprocess died %sunexpectedly with return code: %d%s." % ( - in_msg, - self.rv, - ' [%s]' % (self.signal_name if - self.signal_name is not None else '')) super(VppDiedError, self).__init__(msg) @@ -126,6 +116,7 @@ class _PacketInfo(object): Help process information about the next packet. Set variables to default values. """ + #: Store the index of the packet. index = -1 #: Store the index of the source packet generator interface of the packet. @@ -149,19 +140,25 @@ class _PacketInfo(object): def pump_output(testclass): - """ pump output from vpp stdout/stderr to proper queues """ + """pump output from vpp stdout/stderr to proper queues""" + if not hasattr(testclass, "vpp"): + return stdout_fragment = "" stderr_fragment = "" while not testclass.pump_thread_stop_flag.is_set(): - readable = select.select([testclass.vpp.stdout.fileno(), - testclass.vpp.stderr.fileno(), - testclass.pump_thread_wakeup_pipe[0]], - [], [])[0] + readable = select.select( + [ + testclass.vpp.stdout.fileno(), + testclass.vpp.stderr.fileno(), + testclass.pump_thread_wakeup_pipe[0], + ], + [], + [], + )[0] if testclass.vpp.stdout.fileno() in readable: read = os.read(testclass.vpp.stdout.fileno(), 102400) if len(read) > 0: - split = read.decode('ascii', - errors='backslashreplace').splitlines(True) + split = read.decode("ascii", errors="backslashreplace").splitlines(True) if len(stdout_fragment) > 0: split[0] = "%s%s" % (stdout_fragment, split[0]) if len(split) > 0 and split[-1].endswith("\n"): @@ -170,15 +167,13 @@ def pump_output(testclass): limit = -1 stdout_fragment = split[-1] testclass.vpp_stdout_deque.extend(split[:limit]) - if not testclass.cache_vpp_output: + if not config.cache_vpp_output: for line in split[:limit]: - testclass.logger.info( - "VPP STDOUT: %s" % line.rstrip("\n")) + testclass.logger.info("VPP STDOUT: %s" % line.rstrip("\n")) if testclass.vpp.stderr.fileno() in readable: read = os.read(testclass.vpp.stderr.fileno(), 102400) if len(read) > 0: - split = read.decode('ascii', - errors='backslashreplace').splitlines(True) + split = read.decode("ascii", errors="backslashreplace").splitlines(True) if len(stderr_fragment) > 0: split[0] = "%s%s" % (stderr_fragment, split[0]) if len(split) > 0 and split[-1].endswith("\n"): @@ -188,47 +183,47 @@ def pump_output(testclass): stderr_fragment = split[-1] testclass.vpp_stderr_deque.extend(split[:limit]) - if not testclass.cache_vpp_output: + if not config.cache_vpp_output: for line in split[:limit]: - testclass.logger.error( - "VPP STDERR: %s" % line.rstrip("\n")) + testclass.logger.error("VPP STDERR: %s" % line.rstrip("\n")) # ignoring the dummy pipe here intentionally - the # flag will take care of properly terminating the loop -def _is_skip_aarch64_set(): - return BoolEnvironmentVariable('SKIP_AARCH64') - - -is_skip_aarch64_set = _is_skip_aarch64_set() - - def _is_platform_aarch64(): - return platform.machine() == 'aarch64' + return platform.machine() == "aarch64" is_platform_aarch64 = _is_platform_aarch64() -def _running_extended_tests(): - return BoolEnvironmentVariable("EXTENDED_TESTS") +def _is_distro_ubuntu2204(): + with open("/etc/os-release") as f: + for line in f.readlines(): + if "jammy" in line: + return True + return False -running_extended_tests = _running_extended_tests() +is_distro_ubuntu2204 = _is_distro_ubuntu2204() -def _running_on_centos(): - os_id = os.getenv("OS_ID", "") - return True if "centos" in os_id.lower() else False +def _is_distro_debian11(): + with open("/etc/os-release") as f: + for line in f.readlines(): + if "bullseye" in line: + return True + return False -running_on_centos = _running_on_centos() +is_distro_debian11 = _is_distro_debian11() class KeepAliveReporter(object): """ Singleton object which reports test start to parent process """ + _shared_state = {} def __init__(self): @@ -249,26 +244,90 @@ class KeepAliveReporter(object): """ Write current test tmpdir & desc to keep-alive pipe to signal liveness """ - if self.pipe is None: + if not hasattr(test, "vpp") or self.pipe is None: # if not running forked.. return if isclass(test): - desc = '%s (%s)' % (desc, unittest.util.strclass(test)) + desc = "%s (%s)" % (desc, unittest.util.strclass(test)) else: desc = test.id() - self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid)) + self.pipe.send((desc, config.vpp, test.tempdir, test.vpp.pid)) + + +class TestCaseTag(Enum): + # marks the suites that must run at the end + # using only a single test runner + RUN_SOLO = 1 + # marks the suites broken on VPP multi-worker + FIXME_VPP_WORKERS = 2 + # marks the suites broken when ASan is enabled + FIXME_ASAN = 3 + # marks suites broken on Ubuntu-22.04 + FIXME_UBUNTU2204 = 4 + # marks suites broken on Debian-11 + FIXME_DEBIAN11 = 5 + # marks suites broken on debug vpp image + FIXME_VPP_DEBUG = 6 + + +def create_tag_decorator(e): + def decorator(cls): + try: + cls.test_tags.append(e) + except AttributeError: + cls.test_tags = [e] + return cls + + return decorator + +tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO) +tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS) +tag_fixme_asan = create_tag_decorator(TestCaseTag.FIXME_ASAN) +tag_fixme_ubuntu2204 = create_tag_decorator(TestCaseTag.FIXME_UBUNTU2204) +tag_fixme_debian11 = create_tag_decorator(TestCaseTag.FIXME_DEBIAN11) +tag_fixme_vpp_debug = create_tag_decorator(TestCaseTag.FIXME_VPP_DEBUG) -class VppTestCase(unittest.TestCase): + +class DummyVpp: + returncode = None + pid = 0xCAFEBAFE + + def poll(self): + pass + + def terminate(self): + pass + + +class CPUInterface(ABC): + cpus = [] + skipped_due_to_cpu_lack = False + + @classmethod + @abstractmethod + def get_cpus_required(cls): + pass + + @classmethod + def assign_cpus(cls, cpus): + cls.cpus = cpus + + +@use_running +class VppTestCase(CPUInterface, unittest.TestCase): """This subclass is a base class for VPP test cases that are implemented as classes. It provides methods to create and run test case. """ - extra_vpp_punt_config = [] + extra_vpp_statseg_config = "" + extra_vpp_config = [] extra_vpp_plugin_config = [] + logger = null_logger vapi_response_timeout = 5 + remove_configured_vpp_objects_on_tear_down = True @property def packet_infos(self): @@ -283,6 +342,44 @@ class VppTestCase(unittest.TestCase): else: return 0 + @classmethod + def has_tag(cls, tag): + """if the test case has a given tag - return true""" + try: + return tag in cls.test_tags + except AttributeError: + pass + return False + + @classmethod + def is_tagged_run_solo(cls): + """if the test case class is timing-sensitive - return true""" + return cls.has_tag(TestCaseTag.RUN_SOLO) + + @classmethod + def skip_fixme_asan(cls): + """if @tag_fixme_asan & ASan is enabled - mark for skip""" + if cls.has_tag(TestCaseTag.FIXME_ASAN): + vpp_extra_cmake_args = os.environ.get("VPP_EXTRA_CMAKE_ARGS", "") + if "DVPP_ENABLE_SANITIZE_ADDR=ON" in vpp_extra_cmake_args: + cls = unittest.skip("Skipping @tag_fixme_asan tests")(cls) + + @classmethod + def skip_fixme_ubuntu2204(cls): + """if distro is ubuntu 22.04 and @tag_fixme_ubuntu2204 mark for skip""" + if cls.has_tag(TestCaseTag.FIXME_UBUNTU2204): + cls = unittest.skip("Skipping @tag_fixme_ubuntu2204 tests")(cls) + + @classmethod + def skip_fixme_debian11(cls): + """if distro is Debian-11 and @tag_fixme_debian11 mark for skip""" + if cls.has_tag(TestCaseTag.FIXME_DEBIAN11): + cls = unittest.skip("Skipping @tag_fixme_debian11 tests")(cls) + + @classmethod + def skip_fixme_vpp_debug(cls): + cls = unittest.skip("Skipping @tag_fixme_vpp_debug tests")(cls) + @classmethod def instance(cls): """Return the instance of this testcase""" @@ -295,6 +392,7 @@ class VppTestCase(unittest.TestCase): cls.debug_gdb = False cls.debug_gdbserver = False cls.debug_all = False + cls.debug_attach = False if d is None: return dl = d.lower() @@ -304,99 +402,142 @@ class VppTestCase(unittest.TestCase): cls.debug_gdb = True elif dl == "gdbserver" or dl == "gdbserver-all": cls.debug_gdbserver = True + elif dl == "attach": + cls.debug_attach = True else: raise Exception("Unrecognized DEBUG option: '%s'" % d) if dl == "gdb-all" or dl == "gdbserver-all": cls.debug_all = True - @staticmethod - def get_least_used_cpu(): - cpu_usage_list = [set(range(psutil.cpu_count()))] - vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name']) - if 'vpp_main' == p.info['name']] - for vpp_process in vpp_processes: - for cpu_usage_set in cpu_usage_list: - try: - cpu_num = vpp_process.cpu_num() - if cpu_num in cpu_usage_set: - cpu_usage_set_index = cpu_usage_list.index( - cpu_usage_set) - if cpu_usage_set_index == len(cpu_usage_list) - 1: - cpu_usage_list.append({cpu_num}) - else: - cpu_usage_list[cpu_usage_set_index + 1].add( - cpu_num) - cpu_usage_set.remove(cpu_num) - break - except psutil.NoSuchProcess: - pass - - for cpu_usage_set in cpu_usage_list: - if len(cpu_usage_set) > 0: - min_usage_set = cpu_usage_set - break + @classmethod + def get_vpp_worker_count(cls): + if not hasattr(cls, "vpp_worker_count"): + if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS): + cls.vpp_worker_count = 0 + else: + cls.vpp_worker_count = config.vpp_worker_count + return cls.vpp_worker_count - return random.choice(tuple(min_usage_set)) + @classmethod + def get_cpus_required(cls): + return 1 + cls.get_vpp_worker_count() @classmethod def setUpConstants(cls): - """ Set-up the test case class based on environment variables """ - cls.step = BoolEnvironmentVariable('STEP') - d = os.getenv("DEBUG", None) - # inverted case to handle '' == True - c = os.getenv("CACHE_OUTPUT", "1") - cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True - cls.set_debug_flags(d) - cls.vpp_bin = os.getenv('VPP_BIN', "vpp") - cls.plugin_path = os.getenv('VPP_PLUGIN_PATH') - cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH') - cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS') - plugin_path = None - if cls.plugin_path is not None: - if cls.extern_plugin_path is not None: - plugin_path = "%s:%s" % ( - cls.plugin_path, cls.extern_plugin_path) - else: - plugin_path = cls.plugin_path - elif cls.extern_plugin_path is not None: - plugin_path = cls.extern_plugin_path + """Set-up the test case class based on environment variables""" + cls.step = config.step + cls.plugin_path = ":".join(config.vpp_plugin_dir) + cls.test_plugin_path = ":".join(config.vpp_test_plugin_dir) + cls.extern_plugin_path = ":".join(config.extern_plugin_dir) debug_cli = "" if cls.step or cls.debug_gdb or cls.debug_gdbserver: debug_cli = "cli-listen localhost:5002" - coredump_size = None - size = os.getenv("COREDUMP_SIZE") - if size is not None: - coredump_size = "coredump-size %s" % size - if coredump_size is None: + size = re.search(r"\d+[gG]", config.coredump_size) + if size: + coredump_size = f"coredump-size {config.coredump_size}".lower() + else: coredump_size = "coredump-size unlimited" - - cpu_core_number = cls.get_least_used_cpu() - if not hasattr(cls, "worker_config"): - cls.worker_config = "" - - cls.vpp_cmdline = [cls.vpp_bin, "unix", - "{", "nodaemon", debug_cli, "full-coredump", - coredump_size, "runtime-dir", cls.tempdir, "}", - "api-trace", "{", "on", "}", "api-segment", "{", - "prefix", cls.shm_prefix, "}", "cpu", "{", - "main-core", str(cpu_core_number), - cls.worker_config, "}", - "statseg", "{", "socket-name", cls.stats_sock, "}", - "socksvr", "{", "socket-name", cls.api_sock, "}", - "plugins", - "{", "plugin", "dpdk_plugin.so", "{", "disable", - "}", "plugin", "rdma_plugin.so", "{", "disable", - "}", "plugin", "unittest_plugin.so", "{", "enable", - "}"] + cls.extra_vpp_plugin_config + ["}", ] - if cls.extra_vpp_punt_config is not None: - cls.vpp_cmdline.extend(cls.extra_vpp_punt_config) - if plugin_path is not None: - cls.vpp_cmdline.extend(["plugin_path", plugin_path]) - if cls.test_plugin_path is not None: - cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path]) - - cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline) - cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline)) + default_variant = config.variant + if default_variant is not None: + default_variant = "default { variant %s 100 }" % default_variant + else: + default_variant = "" + + api_fuzzing = config.api_fuzz + if api_fuzzing is None: + api_fuzzing = "off" + + cls.vpp_cmdline = [ + config.vpp, + "unix", + "{", + "nodaemon", + debug_cli, + "full-coredump", + coredump_size, + "runtime-dir", + cls.tempdir, + "}", + "api-trace", + "{", + "on", + "}", + "api-segment", + "{", + "prefix", + cls.get_api_segment_prefix(), + "}", + "cpu", + "{", + "main-core", + str(cls.cpus[0]), + ] + if cls.extern_plugin_path not in (None, ""): + cls.extra_vpp_plugin_config.append("add-path %s" % cls.extern_plugin_path) + if cls.get_vpp_worker_count(): + cls.vpp_cmdline.extend( + ["corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])] + ) + cls.vpp_cmdline.extend( + [ + "}", + "physmem", + "{", + "max-size", + "32m", + "}", + "statseg", + "{", + "socket-name", + cls.get_stats_sock_path(), + cls.extra_vpp_statseg_config, + "}", + "socksvr", + "{", + "socket-name", + cls.get_api_sock_path(), + "}", + "node { ", + default_variant, + "}", + "api-fuzz {", + api_fuzzing, + "}", + "plugins", + "{", + "plugin", + "dpdk_plugin.so", + "{", + "disable", + "}", + "plugin", + "rdma_plugin.so", + "{", + "disable", + "}", + "plugin", + "lisp_unittest_plugin.so", + "{", + "enable", + "}", + "plugin", + "unittest_plugin.so", + "{", + "enable", + "}", + ] + + cls.extra_vpp_plugin_config + + [ + "}", + ] + ) + + if cls.extra_vpp_config is not None: + cls.vpp_cmdline.extend(cls.extra_vpp_config) + + if not cls.debug_attach: + cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline) + cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline)) @classmethod def wait_for_enter(cls): @@ -412,69 +553,73 @@ class VppTestCase(unittest.TestCase): print(single_line_delim) print("You can debug VPP using:") if cls.debug_gdbserver: - print("sudo gdb " + cls.vpp_bin + - " -ex 'target remote localhost:{port}'" - .format(port=cls.gdbserver_port)) - print("Now is the time to attach gdb by running the above " - "command, set up breakpoints etc., then resume VPP from " - "within gdb by issuing the 'continue' command") + print( + f"sudo gdb {config.vpp} " + f"-ex 'target remote localhost:{cls.gdbserver_port}'" + ) + print( + "Now is the time to attach gdb by running the above " + "command, set up breakpoints etc., then resume VPP from " + "within gdb by issuing the 'continue' command" + ) cls.gdbserver_port += 1 elif cls.debug_gdb: - print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid) - print("Now is the time to attach gdb by running the above " - "command and set up breakpoints etc., then resume VPP from" - " within gdb by issuing the 'continue' command") + print(f"sudo gdb {config.vpp} -ex 'attach {cls.vpp.pid}'") + print( + "Now is the time to attach gdb by running the above " + "command and set up breakpoints etc., then resume VPP from" + " within gdb by issuing the 'continue' command" + ) print(single_line_delim) input("Press ENTER to continue running the testcase...") + @classmethod + def attach_vpp(cls): + cls.vpp = DummyVpp() + @classmethod def run_vpp(cls): + if ( + is_distro_ubuntu2204 == True and cls.has_tag(TestCaseTag.FIXME_UBUNTU2204) + ) or (is_distro_debian11 == True and cls.has_tag(TestCaseTag.FIXME_DEBIAN11)): + return + cls.logger.debug(f"Assigned cpus: {cls.cpus}") cmdline = cls.vpp_cmdline if cls.debug_gdbserver: - gdbserver = '/usr/bin/gdbserver' - if not os.path.isfile(gdbserver) or \ - not os.access(gdbserver, os.X_OK): - raise Exception("gdbserver binary '%s' does not exist or is " - "not executable" % gdbserver) - - cmdline = [gdbserver, 'localhost:{port}' - .format(port=cls.gdbserver_port)] + cls.vpp_cmdline + gdbserver = "/usr/bin/gdbserver" + if not os.path.isfile(gdbserver) or not os.access(gdbserver, os.X_OK): + raise Exception( + "gdbserver binary '%s' does not exist or is " + "not executable" % gdbserver + ) + + cmdline = [ + gdbserver, + "localhost:{port}".format(port=cls.gdbserver_port), + ] + cls.vpp_cmdline cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline)) try: - cls.vpp = subprocess.Popen(cmdline, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - bufsize=1) + cls.vpp = subprocess.Popen( + cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) except subprocess.CalledProcessError as e: - cls.logger.critical("Subprocess returned with non-0 return code: (" - "%s)", e.returncode) + cls.logger.critical( + "Subprocess returned with non-0 return code: (%s)", e.returncode + ) raise except OSError as e: - cls.logger.critical("Subprocess returned with OS error: " - "(%s) %s", e.errno, e.strerror) + cls.logger.critical( + "Subprocess returned with OS error: (%s) %s", e.errno, e.strerror + ) raise except Exception as e: - cls.logger.exception("Subprocess returned unexpected from " - "%s:", cmdline) + cls.logger.exception("Subprocess returned unexpected from %s:", cmdline) raise cls.wait_for_enter() - @classmethod - def wait_for_stats_socket(cls): - deadline = time.time() + 300 - ok = False - while time.time() < deadline or \ - cls.debug_gdb or cls.debug_gdbserver: - if os.path.exists(cls.stats_sock): - ok = True - break - cls.sleep(0.8) - if not ok: - cls.logger.critical("Couldn't stat : {}".format(cls.stats_sock)) - @classmethod def wait_for_coredump(cls): corefile = cls.tempdir + "/core" @@ -491,11 +636,46 @@ class VppTestCase(unittest.TestCase): ok = True break if not ok: - cls.logger.error("Timed out waiting for coredump to complete:" - " %s", corefile) + cls.logger.error( + "Timed out waiting for coredump to complete: %s", corefile + ) else: - cls.logger.error("Coredump complete: %s, size %d", - corefile, curr_size) + cls.logger.error("Coredump complete: %s, size %d", corefile, curr_size) + + @classmethod + def get_stats_sock_path(cls): + return "%s/stats.sock" % cls.tempdir + + @classmethod + def get_api_sock_path(cls): + return "%s/api.sock" % cls.tempdir + + @classmethod + def get_api_segment_prefix(cls): + return os.path.basename(cls.tempdir) # Only used for VAPI + + @classmethod + def get_tempdir(cls): + if cls.debug_attach: + tmpdir = f"{config.tmp_dir}/unittest-attach-gdb" + else: + tmpdir = f"{config.tmp_dir}/vpp-unittest-{cls.__name__}" + if config.wipe_tmp_dir: + shutil.rmtree(tmpdir, ignore_errors=True) + os.mkdir(tmpdir) + return tmpdir + + @classmethod + def create_file_handler(cls): + if config.log_dir is None: + cls.file_handler = FileHandler(f"{cls.tempdir}/log.txt") + return + + logdir = f"{config.log_dir}/vpp-unittest-{cls.__name__}" + if config.wipe_tmp_dir: + shutil.rmtree(logdir, ignore_errors=True) + os.mkdir(logdir) + cls.file_handler = FileHandler(f"{logdir}/log.txt") @classmethod def setUpClass(cls): @@ -504,34 +684,31 @@ class VppTestCase(unittest.TestCase): Remove shared memory files, start vpp and connect the vpp-api """ super(VppTestCase, cls).setUpClass() - gc.collect() # run garbage collection first cls.logger = get_logger(cls.__name__) - seed = os.environ["RND_SEED"] - random.seed(seed) - if hasattr(cls, 'parallel_handler'): + random.seed(config.rnd_seed) + if hasattr(cls, "parallel_handler"): cls.logger.addHandler(cls.parallel_handler) cls.logger.propagate = False - - cls.tempdir = tempfile.mkdtemp( - prefix='vpp-unittest-%s-' % cls.__name__) - cls.stats_sock = "%s/stats.sock" % cls.tempdir - cls.api_sock = "%s/api.sock" % cls.tempdir - cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir) + cls.set_debug_flags(config.debug) + cls.tempdir = cls.get_tempdir() + cls.create_file_handler() cls.file_handler.setFormatter( - Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s', - datefmt="%H:%M:%S")) + Formatter(fmt="%(asctime)s,%(msecs)03d %(message)s", datefmt="%H:%M:%S") + ) cls.file_handler.setLevel(DEBUG) cls.logger.addHandler(cls.file_handler) - cls.logger.debug("--- setUpClass() for %s called ---" % - cls.__name__) - cls.shm_prefix = os.path.basename(cls.tempdir) + cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__) os.chdir(cls.tempdir) - cls.logger.info("Temporary dir is %s, shm prefix is %s", - cls.tempdir, cls.shm_prefix) - cls.logger.debug("Random seed is %s" % seed) + cls.logger.info( + "Temporary dir is %s, api socket is %s", + cls.tempdir, + cls.get_api_sock_path(), + ) + cls.logger.debug("Random seed is %s", config.rnd_seed) cls.setUpConstants() cls.reset_packet_infos() - cls._captures = [] + cls._pcaps = [] + cls._old_pcaps = [] cls.verbose = 0 cls.vpp_dead = False cls.registry = VppObjectRegistry() @@ -540,98 +717,140 @@ class VppTestCase(unittest.TestCase): # need to catch exceptions here because if we raise, then the cleanup # doesn't get called and we might end with a zombie vpp try: - cls.run_vpp() - cls.reporter.send_keep_alive(cls, 'setUpClass') + if cls.debug_attach: + cls.attach_vpp() + else: + cls.run_vpp() + if not hasattr(cls, "vpp"): + return + cls.reporter.send_keep_alive(cls, "setUpClass") VppTestResult.current_test_case_info = TestCaseInfo( - cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin) + cls.logger, cls.tempdir, cls.vpp.pid, config.vpp + ) cls.vpp_stdout_deque = deque() cls.vpp_stderr_deque = deque() - cls.pump_thread_stop_flag = Event() - cls.pump_thread_wakeup_pipe = os.pipe() - cls.pump_thread = Thread(target=pump_output, args=(cls,)) - cls.pump_thread.daemon = True - cls.pump_thread.start() - if cls.debug_gdb or cls.debug_gdbserver: + # Pump thread in a non-debug-attached & not running-vpp + if not cls.debug_attach and not hasattr(cls, "running_vpp"): + cls.pump_thread_stop_flag = Event() + cls.pump_thread_wakeup_pipe = os.pipe() + cls.pump_thread = Thread(target=pump_output, args=(cls,)) + cls.pump_thread.daemon = True + cls.pump_thread.start() + if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach: cls.vapi_response_timeout = 0 - cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls, - cls.vapi_response_timeout) + cls.vapi = VppPapiProvider(cls.__name__, cls, cls.vapi_response_timeout) if cls.step: hook = hookmodule.StepHook(cls) else: hook = hookmodule.PollHook(cls) cls.vapi.register_hook(hook) - cls.wait_for_stats_socket() - cls.statistics = VPPStats(socketname=cls.stats_sock) + cls.statistics = VPPStats(socketname=cls.get_stats_sock_path()) try: hook.poll_vpp() except VppDiedError: cls.vpp_startup_failed = True cls.logger.critical( "VPP died shortly after startup, check the" - " output to standard error for possible cause") + " output to standard error for possible cause" + ) raise try: cls.vapi.connect() - except vpp_papi.VPPIOError as e: + except (vpp_papi.VPPIOError, Exception) as e: cls.logger.debug("Exception connecting to vapi: %s" % e) cls.vapi.disconnect() if cls.debug_gdbserver: - print(colorize("You're running VPP inside gdbserver but " - "VPP-API connection failed, did you forget " - "to 'continue' VPP from within gdb?", RED)) - raise + print( + colorize( + "You're running VPP inside gdbserver but " + "VPP-API connection failed, did you forget " + "to 'continue' VPP from within gdb?", + RED, + ) + ) + raise e + if cls.debug_attach: + last_line = cls.vapi.cli("show thread").split("\n")[-2] + cls.vpp_worker_count = int(last_line.split(" ")[0]) + print("Detected VPP with %s workers." % cls.vpp_worker_count) + except vpp_papi.VPPRuntimeError as e: + cls.logger.debug("%s" % e) + cls.quit() + raise e except Exception as e: cls.logger.debug("Exception connecting to VPP: %s" % e) - cls.quit() - raise + raise e + + @classmethod + def _debug_quit(cls): + if cls.debug_gdbserver or cls.debug_gdb: + try: + cls.vpp.poll() + + if cls.vpp.returncode is None: + print() + print(double_line_delim) + print("VPP or GDB server is still running") + print(single_line_delim) + input( + "When done debugging, press ENTER to kill the " + "process and finish running the testcase..." + ) + except AttributeError: + pass @classmethod def quit(cls): """ Disconnect vpp-api, kill vpp and cleanup shared memory files """ - if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'): - cls.vpp.poll() - if cls.vpp.returncode is None: - print() - print(double_line_delim) - print("VPP or GDB server is still running") - print(single_line_delim) - input("When done debugging, press ENTER to kill the " - "process and finish running the testcase...") + cls._debug_quit() + if hasattr(cls, "running_vpp"): + cls.vpp.quit_vpp() # first signal that we want to stop the pump thread, then wake it up - if hasattr(cls, 'pump_thread_stop_flag'): + if hasattr(cls, "pump_thread_stop_flag"): cls.pump_thread_stop_flag.set() - if hasattr(cls, 'pump_thread_wakeup_pipe'): - os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up') - if hasattr(cls, 'pump_thread'): + if hasattr(cls, "pump_thread_wakeup_pipe"): + os.write(cls.pump_thread_wakeup_pipe[1], b"ding dong wake up") + if hasattr(cls, "pump_thread"): cls.logger.debug("Waiting for pump thread to stop") cls.pump_thread.join() - if hasattr(cls, 'vpp_stderr_reader_thread'): - cls.logger.debug("Waiting for stdderr pump to stop") + if hasattr(cls, "vpp_stderr_reader_thread"): + cls.logger.debug("Waiting for stderr pump to stop") cls.vpp_stderr_reader_thread.join() - if hasattr(cls, 'vpp'): - if hasattr(cls, 'vapi'): - cls.logger.debug("Disconnecting class vapi client on %s", - cls.__name__) + if hasattr(cls, "vpp"): + if hasattr(cls, "vapi"): + cls.logger.debug(cls.vapi.vpp.get_stats()) + cls.logger.debug("Disconnecting class vapi client on %s", cls.__name__) cls.vapi.disconnect() - cls.logger.debug("Deleting class vapi attribute on %s", - cls.__name__) + cls.logger.debug("Deleting class vapi attribute on %s", cls.__name__) del cls.vapi cls.vpp.poll() - if cls.vpp.returncode is None: + if not cls.debug_attach and cls.vpp.returncode is None: cls.wait_for_coredump() cls.logger.debug("Sending TERM to vpp") cls.vpp.terminate() cls.logger.debug("Waiting for vpp to die") - cls.vpp.communicate() - cls.logger.debug("Deleting class vpp attribute on %s", - cls.__name__) - del cls.vpp + try: + outs, errs = cls.vpp.communicate(timeout=5) + except subprocess.TimeoutExpired: + cls.vpp.kill() + outs, errs = cls.vpp.communicate() + cls.logger.debug("Deleting class vpp attribute on %s", cls.__name__) + if not cls.debug_attach and not hasattr(cls, "running_vpp"): + cls.vpp.stdout.close() + cls.vpp.stderr.close() + # If vpp is a dynamic attribute set by the func use_running, + # deletion will result in an AttributeError that we can + # safetly pass. + try: + del cls.vpp + except AttributeError: + pass if cls.vpp_startup_failed: stdout_log = cls.logger.info @@ -640,47 +859,51 @@ class VppTestCase(unittest.TestCase): stdout_log = cls.logger.info stderr_log = cls.logger.info - if hasattr(cls, 'vpp_stdout_deque'): + if hasattr(cls, "vpp_stdout_deque"): stdout_log(single_line_delim) - stdout_log('VPP output to stdout while running %s:', cls.__name__) + stdout_log("VPP output to stdout while running %s:", cls.__name__) stdout_log(single_line_delim) vpp_output = "".join(cls.vpp_stdout_deque) - with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f: + with open(cls.tempdir + "/vpp_stdout.txt", "w") as f: f.write(vpp_output) - stdout_log('\n%s', vpp_output) + stdout_log("\n%s", vpp_output) stdout_log(single_line_delim) - if hasattr(cls, 'vpp_stderr_deque'): + if hasattr(cls, "vpp_stderr_deque"): stderr_log(single_line_delim) - stderr_log('VPP output to stderr while running %s:', cls.__name__) + stderr_log("VPP output to stderr while running %s:", cls.__name__) stderr_log(single_line_delim) vpp_output = "".join(cls.vpp_stderr_deque) - with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f: + with open(cls.tempdir + "/vpp_stderr.txt", "w") as f: f.write(vpp_output) - stderr_log('\n%s', vpp_output) + stderr_log("\n%s", vpp_output) stderr_log(single_line_delim) @classmethod def tearDownClass(cls): - """ Perform final cleanup after running all tests in this test-case """ - cls.logger.debug("--- tearDownClass() for %s called ---" % - cls.__name__) - cls.reporter.send_keep_alive(cls, 'tearDownClass') + """Perform final cleanup after running all tests in this test-case""" + cls.logger.debug("--- tearDownClass() for %s called ---" % cls.__name__) + if not hasattr(cls, "vpp"): + return + cls.reporter.send_keep_alive(cls, "tearDownClass") cls.quit() cls.file_handler.close() cls.reset_packet_infos() - if debug_framework: + if config.debug_framework: debug_internal.on_tear_down_class(cls) def show_commands_at_teardown(self): - """ Allow subclass specific teardown logging additions.""" + """Allow subclass specific teardown logging additions.""" self.logger.info("--- No test specific show commands provided. ---") def tearDown(self): - """ Show various debug prints after each test """ - self.logger.debug("--- tearDown() for %s.%s(%s) called ---" % - (self.__class__.__name__, self._testMethodName, - self._testMethodDoc)) + """Show various debug prints after each test""" + self.logger.debug( + "--- tearDown() for %s.%s(%s) called ---" + % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) + ) + if not hasattr(self, "vpp"): + return try: if not self.vpp_dead: @@ -693,42 +916,45 @@ class VppTestCase(unittest.TestCase): self.logger.info(self.vapi.ppcli("show bihash")) self.logger.info("Logging testcase specific show commands.") self.show_commands_at_teardown() - self.registry.remove_vpp_config(self.logger) + if self.remove_configured_vpp_objects_on_tear_down: + self.registry.remove_vpp_config(self.logger) # Save/Dump VPP api trace log m = self._testMethodName api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid) tmp_api_trace = "/tmp/%s" % api_trace vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace) self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace)) - self.logger.info("Moving %s to %s\n" % (tmp_api_trace, - vpp_api_trace_log)) - os.rename(tmp_api_trace, vpp_api_trace_log) - self.logger.info(self.vapi.ppcli("api trace custom-dump %s" % - vpp_api_trace_log)) - except VppTransportShmemIOError: - self.logger.debug("VppTransportShmemIOError: Vpp dead. " - "Cannot log show commands.") + self.logger.info("Moving %s to %s\n" % (tmp_api_trace, vpp_api_trace_log)) + shutil.move(tmp_api_trace, vpp_api_trace_log) + except VppTransportSocketIOError: + self.logger.debug( + "VppTransportSocketIOError: Vpp dead. Cannot log show commands." + ) self.vpp_dead = True else: self.registry.unregister_all(self.logger) def setUp(self): - """ Clear trace before running each test""" + """Clear trace before running each test""" super(VppTestCase, self).setUp() + if not hasattr(self, "vpp"): + return self.reporter.send_keep_alive(self) if self.vpp_dead: - - raise VppDiedError(rv=None, testcase=self.__class__.__name__, - method_name=self._testMethodName) - self.sleep(.1, "during setUp") + raise VppDiedError( + rv=None, + testcase=self.__class__.__name__, + method_name=self._testMethodName, + ) + self.sleep(0.1, "during setUp") self.vpp_stdout_deque.append( - "--- test setUp() for %s.%s(%s) starts here ---\n" % - (self.__class__.__name__, self._testMethodName, - self._testMethodDoc)) + "--- test setUp() for %s.%s(%s) starts here ---\n" + % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) + ) self.vpp_stderr_deque.append( - "--- test setUp() for %s.%s(%s) starts here ---\n" % - (self.__class__.__name__, self._testMethodName, - self._testMethodDoc)) + "--- test setUp() for %s.%s(%s) starts here ---\n" + % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) + ) self.vapi.cli("clear trace") # store the test instance inside the test class - so that objects # holding the class can access instance methods (like assertEqual) @@ -749,18 +975,23 @@ class VppTestCase(unittest.TestCase): i.enable_capture() @classmethod - def register_capture(cls, cap_name): - """ Register a capture in the testclass """ + def register_pcap(cls, intf, worker): + """Register a pcap in the testclass""" # add to the list of captures with current timestamp - cls._captures.append((time.time(), cap_name)) + cls._pcaps.append((intf, worker)) @classmethod def get_vpp_time(cls): - return float(cls.vapi.cli('show clock').replace("Time now ", "")) + # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT" + # returns float("2.190522") + timestr = cls.vapi.cli("show clock") + head, sep, tail = timestr.partition(",") + head, sep, tail = head.partition("Time now") + return float(tail) @classmethod def sleep_on_vpp_time(cls, sec): - """ Sleep according to time in VPP world """ + """Sleep according to time in VPP world""" # On a busy system with many processes # we might end up with VPP time being slower than real world # So take that into account when waiting for VPP to do something @@ -769,26 +1000,32 @@ class VppTestCase(unittest.TestCase): cls.sleep(0.1) @classmethod - def pg_start(cls): - """ Enable the PG, wait till it is done, then clean up """ - cls.vapi.cli("trace add pg-input 1000") - cls.vapi.cli('packet-generator enable') + def pg_start(cls, trace=True): + """Enable the PG, wait till it is done, then clean up""" + for (intf, worker) in cls._old_pcaps: + intf.handle_old_pcap_file(intf.get_in_path(worker), intf.in_history_counter) + cls._old_pcaps = [] + if trace: + cls.vapi.cli("clear trace") + cls.vapi.cli("trace add pg-input 1000") + cls.vapi.cli("packet-generator enable") # PG, when starts, runs to completion - # so let's avoid a race condition, # and wait a little till it's done. # Then clean it up - and then be gone. deadline = time.time() + 300 - while cls.vapi.cli('show packet-generator').find("Yes") != -1: + while cls.vapi.cli("show packet-generator").find("Yes") != -1: cls.sleep(0.01) # yield if time.time() > deadline: cls.logger.error("Timeout waiting for pg to stop") break - for stamp, cap_name in cls._captures: - cls.vapi.cli('packet-generator delete %s' % cap_name) - cls._captures = [] + for intf, worker in cls._pcaps: + cls.vapi.cli("packet-generator delete %s" % intf.get_cap_name(worker)) + cls._old_pcaps = cls._pcaps + cls._pcaps = [] @classmethod - def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0): + def create_pg_interfaces_internal(cls, interfaces, gso=0, gso_size=0, mode=None): """ Create packet-generator interfaces. @@ -798,12 +1035,52 @@ class VppTestCase(unittest.TestCase): """ result = [] for i in interfaces: - intf = VppPGInterface(cls, i, gso, gso_size) + intf = VppPGInterface(cls, i, gso, gso_size, mode) setattr(cls, intf.name, intf) result.append(intf) cls.pg_interfaces = result return result + @classmethod + def create_pg_ip4_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_IP4 + ) + + @classmethod + def create_pg_ip6_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_IP6 + ) + + @classmethod + def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET + ) + + @classmethod + def create_pg_ethernet_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET + ) + @classmethod def create_loopback_interfaces(cls, count): """ @@ -812,6 +1089,9 @@ class VppTestCase(unittest.TestCase): :param count: number of interfaces created. :returns: List of created interfaces. """ + if not hasattr(cls, "vpp"): + cls.lo_interfaces = [] + return cls.lo_interfaces result = [VppLoInterface(cls) for i in range(count)] for intf in result: setattr(cls, intf.name, intf) @@ -826,6 +1106,9 @@ class VppTestCase(unittest.TestCase): :param count: number of interfaces created. :returns: List of created interfaces. """ + if not hasattr(cls, "vpp"): + cls.bvi_interfaces = [] + return cls.bvi_interfaces result = [VppBviInterface(cls) for i in range(count)] for intf in result: setattr(cls, intf.name, intf) @@ -833,7 +1116,7 @@ class VppTestCase(unittest.TestCase): return result @staticmethod - def extend_packet(packet, size, padding=' '): + def extend_packet(packet, size, padding=" "): """ Extend packet to given size by padding with spaces or custom padding NOTE: Currently works only when Raw layer is present. @@ -851,7 +1134,7 @@ class VppTestCase(unittest.TestCase): @classmethod def reset_packet_infos(cls): - """ Reset the list of packet info objects and packet counts to zero """ + """Reset the list of packet info objects and packet counts to zero""" cls._packet_infos = {} cls._packet_count_for_dst_if_idx = {} @@ -891,11 +1174,12 @@ class VppTestCase(unittest.TestCase): :returns: string containing serialized data from packet info """ - return "%d %d %d %d %d" % (info.index, info.src, info.dst, - info.ip, info.proto) + + # retrieve payload, currently 18 bytes (4 x ints + 1 short) + return pack("iiiih", info.index, info.src, info.dst, info.ip, info.proto) @staticmethod - def payload_to_info(payload, payload_field='load'): + def payload_to_info(payload, payload_field="load"): """ Convert packet payload to _PacketInfo object @@ -907,13 +1191,17 @@ class VppTestCase(unittest.TestCase): :returns: _PacketInfo object containing de-serialized data from payload """ - numbers = getattr(payload, payload_field).split() + + # retrieve payload, currently 18 bytes (4 x ints + 1 short) + payload_b = getattr(payload, payload_field)[:18] + info = _PacketInfo() - info.index = int(numbers[0]) - info.src = int(numbers[1]) - info.dst = int(numbers[2]) - info.ip = int(numbers[3]) - info.proto = int(numbers[4]) + info.index, info.src, info.dst, info.ip, info.proto = unpack("iiiih", payload_b) + + # some SRv6 TCs depend on get an exception if bad values are detected + if info.index > 0x4000: + raise ValueError("Index value is invalid") + return info def get_next_packet_info(self, info): @@ -975,45 +1263,55 @@ class VppTestCase(unittest.TestCase): return try: msg = "Invalid %s: %d('%s') does not match expected value %d('%s')" - msg = msg % (getdoc(name_or_class).strip(), - real_value, str(name_or_class(real_value)), - expected_value, str(name_or_class(expected_value))) + msg = msg % ( + getdoc(name_or_class).strip(), + real_value, + str(name_or_class(real_value)), + expected_value, + str(name_or_class(expected_value)), + ) except Exception: msg = "Invalid %s: %s does not match expected value %s" % ( - name_or_class, real_value, expected_value) + name_or_class, + real_value, + expected_value, + ) self.assertEqual(real_value, expected_value, msg) - def assert_in_range(self, - real_value, - expected_min, - expected_max, - name=None): + def assert_in_range(self, real_value, expected_min, expected_max, name=None): if name is None: msg = None else: msg = "Invalid %s: %s out of range <%s,%s>" % ( - name, real_value, expected_min, expected_max) + name, + real_value, + expected_min, + expected_max, + ) self.assertTrue(expected_min <= real_value <= expected_max, msg) - def assert_packet_checksums_valid(self, packet, - ignore_zero_udp_checksums=True): + def assert_packet_checksums_valid(self, packet, ignore_zero_udp_checksums=True): received = packet.__class__(scapy.compat.raw(packet)) - udp_layers = ['UDP', 'UDPerror'] - checksum_fields = ['cksum', 'chksum'] + udp_layers = ["UDP", "UDPerror"] + checksum_fields = ["cksum", "chksum"] checksums = [] counter = 0 temp = received.__class__(scapy.compat.raw(received)) while True: layer = temp.getlayer(counter) if layer: + layer = layer.copy() + layer.remove_payload() for cf in checksum_fields: if hasattr(layer, cf): - if ignore_zero_udp_checksums and \ - 0 == getattr(layer, cf) and \ - layer.name in udp_layers: + if ( + ignore_zero_udp_checksums + and 0 == getattr(layer, cf) + and layer.name in udp_layers + ): continue - delattr(layer, cf) + delattr(temp.getlayer(counter), cf) checksums.append((counter, cf)) else: break @@ -1021,74 +1319,94 @@ class VppTestCase(unittest.TestCase): if 0 == len(checksums): return temp = temp.__class__(scapy.compat.raw(temp)) - for layer, cf in checksums: + for layer, cf in reversed(checksums): calc_sum = getattr(temp[layer], cf) self.assert_equal( - getattr(received[layer], cf), calc_sum, - "packet checksum on layer #%d: %s" % (layer, temp[layer].name)) + getattr(received[layer], cf), + calc_sum, + "packet checksum on layer #%d: %s" % (layer, temp[layer].name), + ) self.logger.debug( - "Checksum field `%s` on `%s` layer has correct value `%s`" % - (cf, temp[layer].name, calc_sum)) - - def assert_checksum_valid(self, received_packet, layer, - field_name='chksum', - ignore_zero_checksum=False): - """ Check checksum of received packet on given layer """ + "Checksum field `%s` on `%s` layer has correct value `%s`" + % (cf, temp[layer].name, calc_sum) + ) + + def assert_checksum_valid( + self, + received_packet, + layer, + checksum_field_names=["chksum", "cksum"], + ignore_zero_checksum=False, + ): + """Check checksum of received packet on given layer""" + layer_copy = received_packet[layer].copy() + layer_copy.remove_payload() + field_name = None + for f in checksum_field_names: + if hasattr(layer_copy, f): + field_name = f + break + if field_name is None: + raise Exception( + f"Layer `{layer}` has none of checksum fields: `{checksum_field_names}`." + ) received_packet_checksum = getattr(received_packet[layer], field_name) if ignore_zero_checksum and 0 == received_packet_checksum: return - recalculated = received_packet.__class__( - scapy.compat.raw(received_packet)) + recalculated = received_packet.__class__(scapy.compat.raw(received_packet)) delattr(recalculated[layer], field_name) recalculated = recalculated.__class__(scapy.compat.raw(recalculated)) - self.assert_equal(received_packet_checksum, - getattr(recalculated[layer], field_name), - "packet checksum on layer: %s" % layer) - - def assert_ip_checksum_valid(self, received_packet, - ignore_zero_checksum=False): - self.assert_checksum_valid(received_packet, 'IP', - ignore_zero_checksum=ignore_zero_checksum) - - def assert_tcp_checksum_valid(self, received_packet, - ignore_zero_checksum=False): - self.assert_checksum_valid(received_packet, 'TCP', - ignore_zero_checksum=ignore_zero_checksum) - - def assert_udp_checksum_valid(self, received_packet, - ignore_zero_checksum=True): - self.assert_checksum_valid(received_packet, 'UDP', - ignore_zero_checksum=ignore_zero_checksum) + self.assert_equal( + received_packet_checksum, + getattr(recalculated[layer], field_name), + f"packet checksum (field: {field_name}) on layer: %s" % layer, + ) + + def assert_ip_checksum_valid(self, received_packet, ignore_zero_checksum=False): + self.assert_checksum_valid( + received_packet, "IP", ignore_zero_checksum=ignore_zero_checksum + ) + + def assert_tcp_checksum_valid(self, received_packet, ignore_zero_checksum=False): + self.assert_checksum_valid( + received_packet, "TCP", ignore_zero_checksum=ignore_zero_checksum + ) + + def assert_udp_checksum_valid(self, received_packet, ignore_zero_checksum=True): + self.assert_checksum_valid( + received_packet, "UDP", ignore_zero_checksum=ignore_zero_checksum + ) def assert_embedded_icmp_checksum_valid(self, received_packet): if received_packet.haslayer(IPerror): - self.assert_checksum_valid(received_packet, 'IPerror') + self.assert_checksum_valid(received_packet, "IPerror") if received_packet.haslayer(TCPerror): - self.assert_checksum_valid(received_packet, 'TCPerror') + self.assert_checksum_valid(received_packet, "TCPerror") if received_packet.haslayer(UDPerror): - self.assert_checksum_valid(received_packet, 'UDPerror', - ignore_zero_checksum=True) + self.assert_checksum_valid( + received_packet, "UDPerror", ignore_zero_checksum=True + ) if received_packet.haslayer(ICMPerror): - self.assert_checksum_valid(received_packet, 'ICMPerror') + self.assert_checksum_valid(received_packet, "ICMPerror") def assert_icmp_checksum_valid(self, received_packet): - self.assert_checksum_valid(received_packet, 'ICMP') + self.assert_checksum_valid(received_packet, "ICMP") self.assert_embedded_icmp_checksum_valid(received_packet) def assert_icmpv6_checksum_valid(self, pkt): if pkt.haslayer(ICMPv6DestUnreach): - self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum') + self.assert_checksum_valid(pkt, "ICMPv6DestUnreach") self.assert_embedded_icmp_checksum_valid(pkt) if pkt.haslayer(ICMPv6EchoRequest): - self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum') + self.assert_checksum_valid(pkt, "ICMPv6EchoRequest") if pkt.haslayer(ICMPv6EchoReply): - self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum') + self.assert_checksum_valid(pkt, "ICMPv6EchoReply") - def get_packet_counter(self, counter): + def get_counter(self, counter): if counter.startswith("/"): counter_value = self.statistics.get_counter(counter) else: - counters = self.vapi.cli("sh errors").split('\n') + counters = self.vapi.cli("sh errors").split("\n") counter_value = 0 for i in range(1, len(counters) - 1): results = counters[i].split() @@ -1097,15 +1415,23 @@ class VppTestCase(unittest.TestCase): break return counter_value + def assert_counter_equal(self, counter, expected_value, thread=None, index=0): + c = self.get_counter(counter) + if thread is not None: + c = c[thread][index] + else: + c = sum(x[index] for x in c) + self.assert_equal(c, expected_value, "counter `%s'" % counter) + def assert_packet_counter_equal(self, counter, expected_value): - counter_value = self.get_packet_counter(counter) - self.assert_equal(counter_value, expected_value, - "packet counter `%s'" % counter) + counter_value = self.get_counter(counter) + self.assert_equal( + counter_value, expected_value, "packet counter `%s'" % counter + ) def assert_error_counter_equal(self, counter, expected_value): - counter_value = self.statistics.get_err_counter(counter) - self.assert_equal(counter_value, expected_value, - "error counter `%s'" % counter) + counter_value = self.statistics[counter].sum() + self.assert_equal(counter_value, expected_value, "error counter `%s'" % counter) @classmethod def sleep(cls, timeout, remark=None): @@ -1116,49 +1442,156 @@ class VppTestCase(unittest.TestCase): # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa if timeout == 0: # yield quantum - if hasattr(os, 'sched_yield'): + if hasattr(os, "sched_yield"): os.sched_yield() else: time.sleep(0) return - if hasattr(cls, 'logger'): - cls.logger.debug("Starting sleep for %es (%s)", timeout, remark) + cls.logger.debug("Starting sleep for %es (%s)", timeout, remark) before = time.time() time.sleep(timeout) after = time.time() - if hasattr(cls, 'logger') and after - before > 2 * timeout: - cls.logger.error("unexpected self.sleep() result - " - "slept for %es instead of ~%es!", - after - before, timeout) - if hasattr(cls, 'logger'): - cls.logger.debug( - "Finished sleep (%s) - slept %es (wanted %es)", - remark, after - before, timeout) - - def pg_send(self, intf, pkts): - self.vapi.cli("clear trace") - intf.add_stream(pkts) + if after - before > 2 * timeout: + cls.logger.error( + "unexpected self.sleep() result - slept for %es instead of ~%es!", + after - before, + timeout, + ) + + cls.logger.debug( + "Finished sleep (%s) - slept %es (wanted %es)", + remark, + after - before, + timeout, + ) + + def virtual_sleep(self, timeout, remark=None): + self.logger.debug("Moving VPP time by %s (%s)", timeout, remark) + self.vapi.cli("set clock adjust %s" % timeout) + + def pg_send(self, intf, pkts, worker=None, trace=True): + intf.add_stream(pkts, worker=worker) self.pg_enable_capture(self.pg_interfaces) - self.pg_start() + self.pg_start(trace=trace) + + def snapshot_stats(self, stats_diff): + """Return snapshot of interesting stats based on diff dictionary.""" + stats_snapshot = {} + for sw_if_index in stats_diff: + for counter in stats_diff[sw_if_index]: + stats_snapshot[counter] = self.statistics[counter] + self.logger.debug(f"Took statistics stats_snapshot: {stats_snapshot}") + return stats_snapshot + + def compare_stats_with_snapshot(self, stats_diff, stats_snapshot): + """Assert appropriate difference between current stats and snapshot.""" + for sw_if_index in stats_diff: + for cntr, diff in stats_diff[sw_if_index].items(): + if sw_if_index == "err": + self.assert_equal( + self.statistics[cntr].sum(), + stats_snapshot[cntr].sum() + diff, + f"'{cntr}' counter value (previous value: " + f"{stats_snapshot[cntr].sum()}, " + f"expected diff: {diff})", + ) + else: + try: + self.assert_equal( + self.statistics[cntr][:, sw_if_index].sum(), + stats_snapshot[cntr][:, sw_if_index].sum() + diff, + f"'{cntr}' counter value (previous value: " + f"{stats_snapshot[cntr][:, sw_if_index].sum()}, " + f"expected diff: {diff})", + ) + except IndexError as e: + # if diff is 0, then this most probably a case where + # test declares multiple interfaces but traffic hasn't + # passed through this one yet - which means the counter + # value is 0 and can be ignored + if 0 != diff: + raise Exception( + f"Couldn't sum counter: {cntr} on sw_if_index: {sw_if_index}" + ) from e + + def send_and_assert_no_replies( + self, intf, pkts, remark="", timeout=None, stats_diff=None, trace=True, msg=None + ): + if stats_diff: + stats_snapshot = self.snapshot_stats(stats_diff) - def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None): self.pg_send(intf, pkts) - if not timeout: - timeout = 1 - for i in self.pg_interfaces: - i.get_capture(0, timeout=timeout) - i.assert_nothing_captured(remark=remark) - timeout = 0.1 - def send_and_expect(self, intf, pkts, output, n_rx=None): + try: + if not timeout: + timeout = 1 + for i in self.pg_interfaces: + i.assert_nothing_captured(timeout=timeout, remark=remark) + timeout = 0.1 + finally: + if trace: + if msg: + self.logger.debug(f"send_and_assert_no_replies: {msg}") + self.logger.debug(self.vapi.cli("show trace")) + + if stats_diff: + self.compare_stats_with_snapshot(stats_diff, stats_snapshot) + + def send_and_expect( + self, + intf, + pkts, + output, + n_rx=None, + worker=None, + trace=True, + msg=None, + stats_diff=None, + ): + if stats_diff: + stats_snapshot = self.snapshot_stats(stats_diff) + if not n_rx: - n_rx = len(pkts) - self.pg_send(intf, pkts) + n_rx = 1 if isinstance(pkts, Packet) else len(pkts) + self.pg_send(intf, pkts, worker=worker, trace=trace) rx = output.get_capture(n_rx) + if trace: + if msg: + self.logger.debug(f"send_and_expect: {msg}") + self.logger.debug(self.vapi.cli("show trace")) + + if stats_diff: + self.compare_stats_with_snapshot(stats_diff, stats_snapshot) + + return rx + + def send_and_expect_load_balancing( + self, input, pkts, outputs, worker=None, trace=True + ): + self.pg_send(input, pkts, worker=worker, trace=trace) + rxs = [] + for oo in outputs: + rx = oo._get_capture(1) + self.assertNotEqual(0, len(rx)) + rxs.append(rx) + if trace: + self.logger.debug(self.vapi.cli("show trace")) + return rxs + + def send_and_expect_some(self, intf, pkts, output, worker=None, trace=True): + self.pg_send(intf, pkts, worker=worker, trace=trace) + rx = output._get_capture(1) + if trace: + self.logger.debug(self.vapi.cli("show trace")) + self.assertTrue(len(rx) > 0) + self.assertTrue(len(rx) < len(pkts)) return rx - def send_and_expect_only(self, intf, pkts, output, timeout=None): + def send_and_expect_only(self, intf, pkts, output, timeout=None, stats_diff=None): + if stats_diff: + stats_snapshot = self.snapshot_stats(stats_diff) + self.pg_send(intf, pkts) rx = output.get_capture(len(pkts)) outputs = [output] @@ -1166,16 +1599,13 @@ class VppTestCase(unittest.TestCase): timeout = 1 for i in self.pg_interfaces: if i not in outputs: - i.get_capture(0, timeout=timeout) - i.assert_nothing_captured() + i.assert_nothing_captured(timeout=timeout) timeout = 0.1 - return rx + if stats_diff: + self.compare_stats_with_snapshot(stats_diff, stats_snapshot) - def runTest(self): - """ unittest calls runTest when TestCase is instantiated without a - test case. Use case: Writing unittests against VppTestCase""" - pass + return rx def get_testcase_doc_name(test): @@ -1218,8 +1648,7 @@ class VppTestResult(unittest.TestResult): core_crash_test_cases_info = set() current_test_case_info = None - def __init__(self, stream=None, descriptions=None, verbosity=None, - runner=None): + def __init__(self, stream=None, descriptions=None, verbosity=None, runner=None): """ :param stream File descriptor to store where to report test results. Set to the standard error stream by default. @@ -1231,8 +1660,10 @@ class VppTestResult(unittest.TestResult): self.stream = stream self.descriptions = descriptions self.verbosity = verbosity + self.result_code = TestResultCode.TEST_RUN self.result_string = None self.runner = runner + self.printed = [] def addSuccess(self, test): """ @@ -1241,15 +1672,25 @@ class VppTestResult(unittest.TestResult): :param test: """ - if self.current_test_case_info: - self.current_test_case_info.logger.debug( - "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__, - test._testMethodName, - test._testMethodDoc)) + self.log_result("addSuccess", test) unittest.TestResult.addSuccess(self, test) self.result_string = colorize("OK", GREEN) - - self.send_result_through_pipe(test, PASS) + self.result_code = TestResultCode.PASS + self.send_result_through_pipe(test, self.result_code) + + def addExpectedFailure(self, test, err): + self.log_result("addExpectedFailure", test, err) + super().addExpectedFailure(test, err) + self.result_string = colorize("FAIL", GREEN) + self.result_code = TestResultCode.EXPECTED_FAIL + self.send_result_through_pipe(test, self.result_code) + + def addUnexpectedSuccess(self, test): + self.log_result("addUnexpectedSuccess", test) + super().addUnexpectedSuccess(test) + self.result_string = colorize("OK", RED) + self.result_code = TestResultCode.UNEXPECTED_PASS + self.send_result_through_pipe(test, self.result_code) def addSkip(self, test, reason): """ @@ -1259,78 +1700,86 @@ class VppTestResult(unittest.TestResult): :param reason: """ - if self.current_test_case_info: - self.current_test_case_info.logger.debug( - "--- addSkip() %s.%s(%s) called, reason is %s" % - (test.__class__.__name__, test._testMethodName, - test._testMethodDoc, reason)) + self.log_result("addSkip", test, reason=reason) unittest.TestResult.addSkip(self, test, reason) self.result_string = colorize("SKIP", YELLOW) - self.send_result_through_pipe(test, SKIP) + if reason == "not enough cpus": + self.result_code = TestResultCode.SKIP_CPU_SHORTAGE + else: + self.result_code = TestResultCode.SKIP + self.send_result_through_pipe(test, self.result_code) def symlink_failed(self): if self.current_test_case_info: try: - failed_dir = os.getenv('FAILED_DIR') + failed_dir = config.failed_dir link_path = os.path.join( failed_dir, - '%s-FAILED' % - os.path.basename(self.current_test_case_info.tempdir)) - if self.current_test_case_info.logger: - self.current_test_case_info.logger.debug( - "creating a link to the failed test") - self.current_test_case_info.logger.debug( - "os.symlink(%s, %s)" % - (self.current_test_case_info.tempdir, link_path)) + "%s-FAILED" % os.path.basename(self.current_test_case_info.tempdir), + ) + + self.current_test_case_info.logger.debug( + "creating a link to the failed test" + ) + self.current_test_case_info.logger.debug( + "os.symlink(%s, %s)" + % (self.current_test_case_info.tempdir, link_path) + ) if os.path.exists(link_path): - if self.current_test_case_info.logger: - self.current_test_case_info.logger.debug( - 'symlink already exists') + self.current_test_case_info.logger.debug("symlink already exists") else: os.symlink(self.current_test_case_info.tempdir, link_path) except Exception as e: - if self.current_test_case_info.logger: - self.current_test_case_info.logger.error(e) + self.current_test_case_info.logger.error(e) def send_result_through_pipe(self, test, result): - if hasattr(self, 'test_framework_result_pipe'): + if hasattr(self, "test_framework_result_pipe"): pipe = self.test_framework_result_pipe if pipe: pipe.send((test.id(), result)) - def log_error(self, test, err, fn_name): + def log_result(self, fn, test, err=None, reason=None): if self.current_test_case_info: if isinstance(test, unittest.suite._ErrorHolder): test_name = test.description else: - test_name = '%s.%s(%s)' % (test.__class__.__name__, - test._testMethodName, - test._testMethodDoc) - self.current_test_case_info.logger.debug( - "--- %s() %s called, err is %s" % - (fn_name, test_name, err)) + test_name = "%s.%s(%s)" % ( + test.__class__.__name__, + test._testMethodName, + test._testMethodDoc, + ) + extra_msg = "" + if err: + extra_msg += f", error is {err}" + if reason: + extra_msg += f", reason is {reason}" self.current_test_case_info.logger.debug( - "formatted exception is:\n%s" % - "".join(format_exception(*err))) - - def add_error(self, test, err, unittest_fn, error_type): - if error_type == FAIL: - self.log_error(test, err, 'addFailure') + f"--- {fn}() {test_name} called{extra_msg}" + ) + if err: + self.current_test_case_info.logger.debug( + "formatted exception is:\n%s" % "".join(format_exception(*err)) + ) + + def add_error(self, test, err, unittest_fn, result_code): + self.result_code = result_code + if result_code == TestResultCode.FAIL: + self.log_result("addFailure", test, err=err) error_type_str = colorize("FAIL", RED) - elif error_type == ERROR: - self.log_error(test, err, 'addError') + elif result_code == TestResultCode.ERROR: + self.log_result("addError", test, err=err) error_type_str = colorize("ERROR", RED) else: - raise Exception('Error type %s cannot be used to record an ' - 'error or a failure' % error_type) + raise Exception(f"Unexpected result code {result_code}") unittest_fn(self, test, err) if self.current_test_case_info: - self.result_string = "%s [ temp dir used by test case: %s ]" % \ - (error_type_str, - self.current_test_case_info.tempdir) + self.result_string = "%s [ temp dir used by test case: %s ]" % ( + error_type_str, + self.current_test_case_info.tempdir, + ) self.symlink_failed() self.failed_test_cases_info.add(self.current_test_case_info) if is_core_present(self.current_test_case_info.tempdir): @@ -1339,14 +1788,14 @@ class VppTestResult(unittest.TestResult): test_name = str(test) else: test_name = "'{!s}' ({!s})".format( - get_testcase_doc_name(test), test.id()) + get_testcase_doc_name(test), test.id() + ) self.current_test_case_info.core_crash_test = test_name - self.core_crash_test_cases_info.add( - self.current_test_case_info) + self.core_crash_test_cases_info.add(self.current_test_case_info) else: - self.result_string = '%s [no temp dir]' % error_type_str + self.result_string = "%s [no temp dir]" % error_type_str - self.send_result_through_pipe(test, error_type) + self.send_result_through_pipe(test, result_code) def addFailure(self, test, err): """ @@ -1356,7 +1805,7 @@ class VppTestResult(unittest.TestResult): :param err: error message """ - self.add_error(test, err, unittest.TestResult.addFailure, FAIL) + self.add_error(test, err, unittest.TestResult.addFailure, TestResultCode.FAIL) def addError(self, test, err): """ @@ -1366,7 +1815,7 @@ class VppTestResult(unittest.TestResult): :param err: error message """ - self.add_error(test, err, unittest.TestResult.addError, ERROR) + self.add_error(test, err, unittest.TestResult.addError, TestResultCode.ERROR) def getDescription(self, test): """ @@ -1387,18 +1836,67 @@ class VppTestResult(unittest.TestResult): """ def print_header(test): - if not hasattr(test.__class__, '_header_printed'): - print(double_line_delim) - print(colorize(getdoc(test).splitlines()[0], GREEN)) - print(double_line_delim) - test.__class__._header_printed = True + if test.__class__ in self.printed: + return + + test_doc = getdoc(test) + if not test_doc: + raise Exception("No doc string for test '%s'" % test.id()) + + test_title = test_doc.splitlines()[0].rstrip() + test_title = colorize(test_title, GREEN) + if test.is_tagged_run_solo(): + test_title = colorize(f"SOLO RUN: {test_title}", YELLOW) + + # This block may overwrite the colorized title above, + # but we want this to stand out and be fixed + if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS): + test_title = colorize(f"FIXME with VPP workers: {test_title}", RED) + + if test.has_tag(TestCaseTag.FIXME_ASAN): + test_title = colorize(f"FIXME with ASAN: {test_title}", RED) + test.skip_fixme_asan() + + if is_distro_ubuntu2204 == True and test.has_tag( + TestCaseTag.FIXME_UBUNTU2204 + ): + test_title = colorize(f"FIXME on Ubuntu-22.04: {test_title}", RED) + test.skip_fixme_ubuntu2204() + + if is_distro_debian11 == True and test.has_tag(TestCaseTag.FIXME_DEBIAN11): + test_title = colorize(f"FIXME on Debian-11: {test_title}", RED) + test.skip_fixme_debian11() + + if "debug" in config.vpp_tag and test.has_tag(TestCaseTag.FIXME_VPP_DEBUG): + test_title = colorize(f"FIXME on VPP Debug: {test_title}", RED) + test.skip_fixme_vpp_debug() + + if hasattr(test, "vpp_worker_count"): + if test.vpp_worker_count == 0: + test_title += " [main thread only]" + elif test.vpp_worker_count == 1: + test_title += " [1 worker thread]" + else: + test_title += f" [{test.vpp_worker_count} worker threads]" - print_header(test) + if test.__class__.skipped_due_to_cpu_lack: + test_title = colorize( + f"{test_title} [skipped - not enough cpus, " + f"required={test.__class__.get_cpus_required()}, " + f"available={max_vpp_cpus}]", + YELLOW, + ) + print(double_line_delim) + print(test_title) + print(double_line_delim) + self.printed.append(test.__class__) + + print_header(test) + self.start_test = time.time() unittest.TestResult.startTest(self, test) if self.verbosity > 0: - self.stream.writeln( - "Starting " + self.getDescription(test) + " ...") + self.stream.writeln("Starting " + self.getDescription(test) + " ...") self.stream.writeln(single_line_delim) def stopTest(self, test): @@ -1409,16 +1907,41 @@ class VppTestResult(unittest.TestResult): """ unittest.TestResult.stopTest(self, test) + + result_code_to_suffix = { + TestResultCode.PASS: "", + TestResultCode.FAIL: "", + TestResultCode.ERROR: "", + TestResultCode.SKIP: "", + TestResultCode.TEST_RUN: "", + TestResultCode.SKIP_CPU_SHORTAGE: "", + TestResultCode.EXPECTED_FAIL: " [EXPECTED FAIL]", + TestResultCode.UNEXPECTED_PASS: " [UNEXPECTED PASS]", + } + if self.verbosity > 0: self.stream.writeln(single_line_delim) - self.stream.writeln("%-73s%s" % (self.getDescription(test), - self.result_string)) + self.stream.writeln( + "%-72s%s%s" + % ( + self.getDescription(test), + self.result_string, + result_code_to_suffix[self.result_code], + ) + ) self.stream.writeln(single_line_delim) else: - self.stream.writeln("%-73s%s" % (self.getDescription(test), - self.result_string)) + self.stream.writeln( + "%-67s %4.2f %s%s" + % ( + self.getDescription(test), + time.time() - self.start_test, + self.result_string, + result_code_to_suffix[self.result_code], + ) + ) - self.send_result_through_pipe(test, TEST_RUN) + self.send_result_through_pipe(test, TestResultCode.TEST_RUN) def printErrors(self): """ @@ -1426,12 +1949,12 @@ class VppTestResult(unittest.TestResult): """ if len(self.errors) > 0 or len(self.failures) > 0: self.stream.writeln() - self.printErrorList('ERROR', self.errors) - self.printErrorList('FAIL', self.failures) + self.printErrorList("ERROR", self.errors) + self.printErrorList("FAIL", self.failures) # ^^ that is the last output from unittest before summary if not self.runner.print_summary: - devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w')) + devnull = unittest.runner._WritelnDecorator(open(os.devnull, "w")) self.stream = devnull self.runner.stream = devnull @@ -1446,8 +1969,7 @@ class VppTestResult(unittest.TestResult): """ for test, err in errors: self.stream.writeln(double_line_delim) - self.stream.writeln("%s: %s" % - (flavour, self.getDescription(test))) + self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(single_line_delim) self.stream.writeln("%s" % err) @@ -1462,14 +1984,23 @@ class VppTestRunner(unittest.TextTestRunner): """Class maintaining the results of the tests""" return VppTestResult - def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1, - result_pipe=None, failfast=False, buffer=False, - resultclass=None, print_summary=True, **kwargs): + def __init__( + self, + keep_alive_pipe=None, + descriptions=True, + verbosity=1, + result_pipe=None, + failfast=False, + buffer=False, + resultclass=None, + print_summary=True, + **kwargs, + ): # ignore stream setting here, use hard-coded stdout to be in sync # with prints from VppTestCase methods ... - super(VppTestRunner, self).__init__(sys.stdout, descriptions, - verbosity, failfast, buffer, - resultclass, **kwargs) + super(VppTestRunner, self).__init__( + sys.stdout, descriptions, verbosity, failfast, buffer, resultclass, **kwargs + ) KeepAliveReporter.pipe = keep_alive_pipe self.orig_stream = self.stream @@ -1478,10 +2009,7 @@ class VppTestRunner(unittest.TextTestRunner): self.print_summary = print_summary def _makeResult(self): - return self.resultclass(self.stream, - self.descriptions, - self.verbosity, - self) + return self.resultclass(self.stream, self.descriptions, self.verbosity, self) def run(self, test): """ @@ -1500,93 +2028,124 @@ class VppTestRunner(unittest.TextTestRunner): class Worker(Thread): - def __init__(self, args, logger, env=None): + def __init__(self, executable_args, logger, env=None, *args, **kwargs): + super(Worker, self).__init__(*args, **kwargs) self.logger = logger - self.args = args - if hasattr(self, 'testcase') and self.testcase.debug_all: + self.args = executable_args + if hasattr(self, "testcase") and self.testcase.debug_all: if self.testcase.debug_gdbserver: - self.args = ['/usr/bin/gdbserver', 'localhost:{port}' - .format(port=self.testcase.gdbserver_port)] + args - elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'): + self.args = [ + "/usr/bin/gdbserver", + "localhost:{port}".format(port=self.testcase.gdbserver_port), + ] + args + elif self.testcase.debug_gdb and hasattr(self, "wait_for_gdb"): self.args.append(self.wait_for_gdb) - self.app_bin = args[0] + self.app_bin = executable_args[0] self.app_name = os.path.basename(self.app_bin) - if hasattr(self, 'role'): - self.app_name += ' {role}'.format(role=self.role) + if hasattr(self, "role"): + self.app_name += " {role}".format(role=self.role) self.process = None self.result = None env = {} if env is None else env self.env = copy.deepcopy(env) - super(Worker, self).__init__() def wait_for_enter(self): - if not hasattr(self, 'testcase'): + if not hasattr(self, "testcase"): return if self.testcase.debug_all and self.testcase.debug_gdbserver: print() print(double_line_delim) - print("Spawned GDB Server for '{app}' with PID: {pid}" - .format(app=self.app_name, pid=self.process.pid)) + print( + "Spawned GDB Server for '{app}' with PID: {pid}".format( + app=self.app_name, pid=self.process.pid + ) + ) elif self.testcase.debug_all and self.testcase.debug_gdb: print() print(double_line_delim) - print("Spawned '{app}' with PID: {pid}" - .format(app=self.app_name, pid=self.process.pid)) + print( + "Spawned '{app}' with PID: {pid}".format( + app=self.app_name, pid=self.process.pid + ) + ) else: return print(single_line_delim) print("You can debug '{app}' using:".format(app=self.app_name)) if self.testcase.debug_gdbserver: - print("sudo gdb " + self.app_bin + - " -ex 'target remote localhost:{port}'" - .format(port=self.testcase.gdbserver_port)) - print("Now is the time to attach gdb by running the above " - "command, set up breakpoints etc., then resume from " - "within gdb by issuing the 'continue' command") + print( + "sudo gdb " + + self.app_bin + + " -ex 'target remote localhost:{port}'".format( + port=self.testcase.gdbserver_port + ) + ) + print( + "Now is the time to attach gdb by running the above " + "command, set up breakpoints etc., then resume from " + "within gdb by issuing the 'continue' command" + ) self.testcase.gdbserver_port += 1 elif self.testcase.debug_gdb: - print("sudo gdb " + self.app_bin + - " -ex 'attach {pid}'".format(pid=self.process.pid)) - print("Now is the time to attach gdb by running the above " - "command and set up breakpoints etc., then resume from" - " within gdb by issuing the 'continue' command") + print( + "sudo gdb " + + self.app_bin + + " -ex 'attach {pid}'".format(pid=self.process.pid) + ) + print( + "Now is the time to attach gdb by running the above " + "command and set up breakpoints etc., then resume from" + " within gdb by issuing the 'continue' command" + ) print(single_line_delim) input("Press ENTER to continue running the testcase...") def run(self): executable = self.args[0] if not os.path.exists(executable) or not os.access( - executable, os.F_OK | os.X_OK): + executable, os.F_OK | os.X_OK + ): # Exit code that means some system file did not exist, # could not be opened, or had some other kind of error. self.result = os.EX_OSFILE raise EnvironmentError( - "executable '%s' is not found or executable." % executable) - self.logger.debug("Running executable: '{app}'" - .format(app=' '.join(self.args))) + "executable '%s' is not found or executable." % executable + ) + self.logger.debug( + "Running executable '{app}': '{cmd}'".format( + app=self.app_name, cmd=" ".join(self.args) + ) + ) env = os.environ.copy() env.update(self.env) env["CK_LOG_FILE_NAME"] = "-" self.process = subprocess.Popen( - self.args, shell=False, env=env, preexec_fn=os.setpgrp, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + ["stdbuf", "-o0", "-e0"] + self.args, + shell=False, + env=env, + preexec_fn=os.setpgrp, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) self.wait_for_enter() out, err = self.process.communicate() self.logger.debug("Finished running `{app}'".format(app=self.app_name)) self.logger.info("Return code is `%s'" % self.process.returncode) self.logger.info(single_line_delim) - self.logger.info("Executable `{app}' wrote to stdout:" - .format(app=self.app_name)) + self.logger.info( + "Executable `{app}' wrote to stdout:".format(app=self.app_name) + ) self.logger.info(single_line_delim) - self.logger.info(out.decode('utf-8')) + self.logger.info(out.decode("utf-8")) self.logger.info(single_line_delim) - self.logger.info("Executable `{app}' wrote to stderr:" - .format(app=self.app_name)) + self.logger.info( + "Executable `{app}' wrote to stderr:".format(app=self.app_name) + ) self.logger.info(single_line_delim) - self.logger.info(err.decode('utf-8')) + self.logger.info(err.decode("utf-8")) self.logger.info(single_line_delim) self.result = self.process.returncode -if __name__ == '__main__': +if __name__ == "__main__": pass