X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=test%2Fframework.py;h=dc08ad08d5a52a1cefc087f751f03bb7b64689bf;hb=23d13c071e80ab6bbed4f5d6cf14ef9ccf05384a;hp=e2b4d7bd5665c341b096b4abd95141df21a38b21;hpb=96867baa6ff72320634da3dd665dca47590124ef;p=vpp.git diff --git a/test/framework.py b/test/framework.py index e2b4d7bd566..dc08ad08d5a 100644 --- a/test/framework.py +++ b/test/framework.py @@ -1,36 +1,69 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from __future__ import print_function -import gc +import logging import sys import os import select +import signal +import subprocess import unittest -import tempfile +import re import time -import resource import faulthandler import random +import copy +import platform +import shutil from collections import deque from threading import Thread, Event from inspect import getdoc, isclass from traceback import format_exception from logging import FileHandler, DEBUG, Formatter -from scapy.packet import Raw -from hook import StepHook, PollHook +from enum import Enum +from abc import ABC, abstractmethod +from struct import pack, unpack + +import scapy.compat +from scapy.packet import Raw, Packet +from config import config, available_cpus, num_cpus, max_vpp_cpus +import hook as hookmodule from vpp_pg_interface import VppPGInterface from vpp_sub_interface import VppSubInterface from vpp_lo_interface import VppLoInterface +from vpp_bvi_interface import VppBviInterface from vpp_papi_provider import VppPapiProvider -from log import * +from vpp_papi import VppEnum +import vpp_papi +from vpp_papi.vpp_stats import VPPStats +from vpp_papi.vpp_transport_socket import VppTransportSocketIOError +from log import ( + RED, + GREEN, + YELLOW, + double_line_delim, + single_line_delim, + get_logger, + colorize, +) from vpp_object import VppObjectRegistry -from vpp_punt_socket import vpp_uds_socket_name -if os.name == 'posix' and sys.version_info[0] < 3: - # using subprocess32 is recommended by python official documentation - # @ https://docs.python.org/2/library/subprocess.html - import subprocess32 as subprocess -else: - import subprocess +from util import ppp, is_core_present +from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror +from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest +from scapy.layers.inet6 import ICMPv6EchoReply +from vpp_running import use_running +from test_result_code import TestResultCode + + +logger = logging.getLogger(__name__) + +# Set up an empty logger for the testcase that can be overridden as necessary +null_logger = logging.getLogger("VppTestCase") +null_logger.addHandler(logging.NullHandler()) + + +if config.debug_framework: + import debug_internal """ Test framework module. @@ -40,12 +73,50 @@ else: """ +class VppDiedError(Exception): + """exception for reporting that the subprocess has died.""" + + signals_by_value = { + v: k + for k, v in signal.__dict__.items() + if k.startswith("SIG") and not k.startswith("SIG_") + } + + def __init__(self, rv=None, testcase=None, method_name=None): + self.rv = rv + self.signal_name = None + self.testcase = testcase + self.method_name = method_name + + try: + self.signal_name = VppDiedError.signals_by_value[-rv] + except (KeyError, TypeError): + pass + + if testcase is None and method_name is None: + in_msg = "" + else: + in_msg = " while running %s.%s" % (testcase, method_name) + + if self.rv: + msg = "VPP subprocess died unexpectedly%s with return code: %d%s." % ( + in_msg, + self.rv, + " [%s]" % (self.signal_name if self.signal_name is not None else ""), + ) + else: + msg = "VPP subprocess died unexpectedly%s." % in_msg + + super(VppDiedError, self).__init__(msg) + + class _PacketInfo(object): """Private class to create packet info object. Help process information about the next packet. Set variables to default values. """ + #: Store the index of the packet. index = -1 #: Store the index of the source packet generator interface of the packet. @@ -69,18 +140,25 @@ class _PacketInfo(object): def pump_output(testclass): - """ pump output from vpp stdout/stderr to proper queues """ + """pump output from vpp stdout/stderr to proper queues""" + if not hasattr(testclass, "vpp"): + return stdout_fragment = "" stderr_fragment = "" - while not testclass.pump_thread_stop_flag.wait(0): - readable = select.select([testclass.vpp.stdout.fileno(), - testclass.vpp.stderr.fileno(), - testclass.pump_thread_wakeup_pipe[0]], - [], [])[0] + while not testclass.pump_thread_stop_flag.is_set(): + readable = select.select( + [ + testclass.vpp.stdout.fileno(), + testclass.vpp.stderr.fileno(), + testclass.pump_thread_wakeup_pipe[0], + ], + [], + [], + )[0] if testclass.vpp.stdout.fileno() in readable: read = os.read(testclass.vpp.stdout.fileno(), 102400) if len(read) > 0: - split = read.splitlines(True) + split = read.decode("ascii", errors="backslashreplace").splitlines(True) if len(stdout_fragment) > 0: split[0] = "%s%s" % (stdout_fragment, split[0]) if len(split) > 0 and split[-1].endswith("\n"): @@ -89,14 +167,13 @@ def pump_output(testclass): limit = -1 stdout_fragment = split[-1] testclass.vpp_stdout_deque.extend(split[:limit]) - if not testclass.cache_vpp_output: + if not config.cache_vpp_output: for line in split[:limit]: - testclass.logger.debug( - "VPP STDOUT: %s" % line.rstrip("\n")) + testclass.logger.info("VPP STDOUT: %s" % line.rstrip("\n")) if testclass.vpp.stderr.fileno() in readable: read = os.read(testclass.vpp.stderr.fileno(), 102400) if len(read) > 0: - split = read.splitlines(True) + split = read.decode("ascii", errors="backslashreplace").splitlines(True) if len(stderr_fragment) > 0: split[0] = "%s%s" % (stderr_fragment, split[0]) if len(split) > 0 and split[-1].endswith("\n"): @@ -104,41 +181,54 @@ def pump_output(testclass): else: limit = -1 stderr_fragment = split[-1] + testclass.vpp_stderr_deque.extend(split[:limit]) - if not testclass.cache_vpp_output: + if not config.cache_vpp_output: for line in split[:limit]: - testclass.logger.debug( - "VPP STDERR: %s" % line.rstrip("\n")) - # ignoring the dummy pipe here intentionally - the flag will take care - # of properly terminating the loop + testclass.logger.error("VPP STDERR: %s" % line.rstrip("\n")) + # ignoring the dummy pipe here intentionally - the + # flag will take care of properly terminating the loop -def running_extended_tests(): - try: - s = os.getenv("EXTENDED_TESTS") - return True if s.lower() in ("y", "yes", "1") else False - except: - return False +def _is_platform_aarch64(): + return platform.machine() == "aarch64" + + +is_platform_aarch64 = _is_platform_aarch64() + + +def _is_distro_ubuntu2204(): + with open("/etc/os-release") as f: + for line in f.readlines(): + if "jammy" in line: + return True return False -def running_on_centos(): - try: - os_id = os.getenv("OS_ID") - return True if "centos" in os_id.lower() else False - except: - return False +is_distro_ubuntu2204 = _is_distro_ubuntu2204() + + +def _is_distro_debian11(): + with open("/etc/os-release") as f: + for line in f.readlines(): + if "bullseye" in line: + return True return False +is_distro_debian11 = _is_distro_debian11() + + class KeepAliveReporter(object): """ Singleton object which reports test start to parent process """ + _shared_state = {} def __init__(self): self.__dict__ = self._shared_state + self._pipe = None @property def pipe(self): @@ -146,33 +236,99 @@ class KeepAliveReporter(object): @pipe.setter def pipe(self, pipe): - if hasattr(self, '_pipe'): + if self._pipe is not None: raise Exception("Internal error - pipe should only be set once.") self._pipe = pipe - def send_keep_alive(self, test): + def send_keep_alive(self, test, desc=None): """ Write current test tmpdir & desc to keep-alive pipe to signal liveness """ - if self.pipe is None: + if not hasattr(test, "vpp") or self.pipe is None: # if not running forked.. return if isclass(test): - desc = test.__name__ + desc = "%s (%s)" % (desc, unittest.util.strclass(test)) else: - desc = test.shortDescription() - if not desc: - desc = str(test) + desc = test.id() + + self.pipe.send((desc, config.vpp, test.tempdir, test.vpp.pid)) - self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid)) +class TestCaseTag(Enum): + # marks the suites that must run at the end + # using only a single test runner + RUN_SOLO = 1 + # marks the suites broken on VPP multi-worker + FIXME_VPP_WORKERS = 2 + # marks the suites broken when ASan is enabled + FIXME_ASAN = 3 + # marks suites broken on Ubuntu-22.04 + FIXME_UBUNTU2204 = 4 + # marks suites broken on Debian-11 + FIXME_DEBIAN11 = 5 + # marks suites broken on debug vpp image + FIXME_VPP_DEBUG = 6 -class VppTestCase(unittest.TestCase): + +def create_tag_decorator(e): + def decorator(cls): + try: + cls.test_tags.append(e) + except AttributeError: + cls.test_tags = [e] + return cls + + return decorator + + +tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO) +tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS) +tag_fixme_asan = create_tag_decorator(TestCaseTag.FIXME_ASAN) +tag_fixme_ubuntu2204 = create_tag_decorator(TestCaseTag.FIXME_UBUNTU2204) +tag_fixme_debian11 = create_tag_decorator(TestCaseTag.FIXME_DEBIAN11) +tag_fixme_vpp_debug = create_tag_decorator(TestCaseTag.FIXME_VPP_DEBUG) + + +class DummyVpp: + returncode = None + pid = 0xCAFEBAFE + + def poll(self): + pass + + def terminate(self): + pass + + +class CPUInterface(ABC): + cpus = [] + skipped_due_to_cpu_lack = False + + @classmethod + @abstractmethod + def get_cpus_required(cls): + pass + + @classmethod + def assign_cpus(cls, cpus): + cls.cpus = cpus + + +@use_running +class VppTestCase(CPUInterface, unittest.TestCase): """This subclass is a base class for VPP test cases that are implemented as classes. It provides methods to create and run test case. """ + extra_vpp_statseg_config = "" + extra_vpp_config = [] + extra_vpp_plugin_config = [] + logger = null_logger + vapi_response_timeout = 5 + remove_configured_vpp_objects_on_tear_down = True + @property def packet_infos(self): """List of packet infos""" @@ -186,6 +342,44 @@ class VppTestCase(unittest.TestCase): else: return 0 + @classmethod + def has_tag(cls, tag): + """if the test case has a given tag - return true""" + try: + return tag in cls.test_tags + except AttributeError: + pass + return False + + @classmethod + def is_tagged_run_solo(cls): + """if the test case class is timing-sensitive - return true""" + return cls.has_tag(TestCaseTag.RUN_SOLO) + + @classmethod + def skip_fixme_asan(cls): + """if @tag_fixme_asan & ASan is enabled - mark for skip""" + if cls.has_tag(TestCaseTag.FIXME_ASAN): + vpp_extra_cmake_args = os.environ.get("VPP_EXTRA_CMAKE_ARGS", "") + if "DVPP_ENABLE_SANITIZE_ADDR=ON" in vpp_extra_cmake_args: + cls = unittest.skip("Skipping @tag_fixme_asan tests")(cls) + + @classmethod + def skip_fixme_ubuntu2204(cls): + """if distro is ubuntu 22.04 and @tag_fixme_ubuntu2204 mark for skip""" + if cls.has_tag(TestCaseTag.FIXME_UBUNTU2204): + cls = unittest.skip("Skipping @tag_fixme_ubuntu2204 tests")(cls) + + @classmethod + def skip_fixme_debian11(cls): + """if distro is Debian-11 and @tag_fixme_debian11 mark for skip""" + if cls.has_tag(TestCaseTag.FIXME_DEBIAN11): + cls = unittest.skip("Skipping @tag_fixme_debian11 tests")(cls) + + @classmethod + def skip_fixme_vpp_debug(cls): + cls = unittest.skip("Skipping @tag_fixme_vpp_debug tests")(cls) + @classmethod def instance(cls): """Return the instance of this testcase""" @@ -193,74 +387,157 @@ class VppTestCase(unittest.TestCase): @classmethod def set_debug_flags(cls, d): + cls.gdbserver_port = 7777 cls.debug_core = False cls.debug_gdb = False cls.debug_gdbserver = False + cls.debug_all = False + cls.debug_attach = False if d is None: return dl = d.lower() if dl == "core": cls.debug_core = True - elif dl == "gdb": + elif dl == "gdb" or dl == "gdb-all": cls.debug_gdb = True - elif dl == "gdbserver": + elif dl == "gdbserver" or dl == "gdbserver-all": cls.debug_gdbserver = True + elif dl == "attach": + cls.debug_attach = True else: raise Exception("Unrecognized DEBUG option: '%s'" % d) + if dl == "gdb-all" or dl == "gdbserver-all": + cls.debug_all = True @classmethod - def setUpConstants(cls): - """ Set-up the test case class based on environment variables """ - try: - s = os.getenv("STEP") - cls.step = True if s.lower() in ("y", "yes", "1") else False - except: - cls.step = False - try: - d = os.getenv("DEBUG") - except: - d = None - try: - c = os.getenv("CACHE_OUTPUT", "1") - cls.cache_vpp_output = \ - False if c.lower() in ("n", "no", "0") else True - except: - cls.cache_vpp_output = True - cls.set_debug_flags(d) - cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp") - cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH') - cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS') - plugin_path = None - if cls.plugin_path is not None: - if cls.extern_plugin_path is not None: - plugin_path = "%s:%s" % ( - cls.plugin_path, cls.extern_plugin_path) + def get_vpp_worker_count(cls): + if not hasattr(cls, "vpp_worker_count"): + if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS): + cls.vpp_worker_count = 0 else: - plugin_path = cls.plugin_path - elif cls.extern_plugin_path is not None: - plugin_path = cls.extern_plugin_path + cls.vpp_worker_count = config.vpp_worker_count + return cls.vpp_worker_count + + @classmethod + def get_cpus_required(cls): + return 1 + cls.get_vpp_worker_count() + + @classmethod + def setUpConstants(cls): + """Set-up the test case class based on environment variables""" + cls.step = config.step + cls.plugin_path = ":".join(config.vpp_plugin_dir) + cls.test_plugin_path = ":".join(config.vpp_test_plugin_dir) + cls.extern_plugin_path = ":".join(config.extern_plugin_dir) debug_cli = "" if cls.step or cls.debug_gdb or cls.debug_gdbserver: debug_cli = "cli-listen localhost:5002" - coredump_size = None - try: - size = os.getenv("COREDUMP_SIZE") - if size is not None: - coredump_size = "coredump-size %s" % size - except: - pass - if coredump_size is None: + size = re.search(r"\d+[gG]", config.coredump_size) + if size: + coredump_size = f"coredump-size {config.coredump_size}".lower() + else: coredump_size = "coredump-size unlimited" - cls.vpp_cmdline = [cls.vpp_bin, "unix", - "{", "nodaemon", debug_cli, "full-coredump", - coredump_size, "}", "api-trace", "{", "on", "}", - "api-segment", "{", "prefix", cls.shm_prefix, "}", - "plugins", "{", "plugin", "dpdk_plugin.so", "{", - "disable", "}", "}", - "punt", "{", "socket", cls.punt_socket_path, "}"] - if plugin_path is not None: - cls.vpp_cmdline.extend(["plugin_path", plugin_path]) - cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline) + default_variant = config.variant + if default_variant is not None: + default_variant = "default { variant %s 100 }" % default_variant + else: + default_variant = "" + + api_fuzzing = config.api_fuzz + if api_fuzzing is None: + api_fuzzing = "off" + + cls.vpp_cmdline = [ + config.vpp, + "unix", + "{", + "nodaemon", + debug_cli, + "full-coredump", + coredump_size, + "runtime-dir", + cls.tempdir, + "}", + "api-trace", + "{", + "on", + "}", + "api-segment", + "{", + "prefix", + cls.get_api_segment_prefix(), + "}", + "cpu", + "{", + "main-core", + str(cls.cpus[0]), + ] + if cls.extern_plugin_path not in (None, ""): + cls.extra_vpp_plugin_config.append("add-path %s" % cls.extern_plugin_path) + if cls.get_vpp_worker_count(): + cls.vpp_cmdline.extend( + ["corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])] + ) + cls.vpp_cmdline.extend( + [ + "}", + "physmem", + "{", + "max-size", + "32m", + "}", + "statseg", + "{", + "socket-name", + cls.get_stats_sock_path(), + cls.extra_vpp_statseg_config, + "}", + "socksvr", + "{", + "socket-name", + cls.get_api_sock_path(), + "}", + "node { ", + default_variant, + "}", + "api-fuzz {", + api_fuzzing, + "}", + "plugins", + "{", + "plugin", + "dpdk_plugin.so", + "{", + "disable", + "}", + "plugin", + "rdma_plugin.so", + "{", + "disable", + "}", + "plugin", + "lisp_unittest_plugin.so", + "{", + "enable", + "}", + "plugin", + "unittest_plugin.so", + "{", + "enable", + "}", + ] + + cls.extra_vpp_plugin_config + + [ + "}", + ] + ) + + if cls.extra_vpp_config is not None: + cls.vpp_cmdline.extend(cls.extra_vpp_config) + + if not cls.debug_attach: + cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline) + cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline)) @classmethod def wait_for_enter(cls): @@ -274,70 +551,164 @@ class VppTestCase(unittest.TestCase): cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid) return print(single_line_delim) - print("You can debug the VPP using e.g.:") + print("You can debug VPP using:") if cls.debug_gdbserver: - print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'") - print("Now is the time to attach a gdb by running the above " - "command, set up breakpoints etc. and then resume VPP from " - "within gdb by issuing the 'continue' command") + print( + f"sudo gdb {config.vpp} " + f"-ex 'target remote localhost:{cls.gdbserver_port}'" + ) + print( + "Now is the time to attach gdb by running the above " + "command, set up breakpoints etc., then resume VPP from " + "within gdb by issuing the 'continue' command" + ) + cls.gdbserver_port += 1 elif cls.debug_gdb: - print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid) - print("Now is the time to attach a gdb by running the above " - "command and set up breakpoints etc.") + print(f"sudo gdb {config.vpp} -ex 'attach {cls.vpp.pid}'") + print( + "Now is the time to attach gdb by running the above " + "command and set up breakpoints etc., then resume VPP from" + " within gdb by issuing the 'continue' command" + ) print(single_line_delim) - raw_input("Press ENTER to continue running the testcase...") + input("Press ENTER to continue running the testcase...") + + @classmethod + def attach_vpp(cls): + cls.vpp = DummyVpp() @classmethod def run_vpp(cls): + if ( + is_distro_ubuntu2204 == True and cls.has_tag(TestCaseTag.FIXME_UBUNTU2204) + ) or (is_distro_debian11 == True and cls.has_tag(TestCaseTag.FIXME_DEBIAN11)): + return + cls.logger.debug(f"Assigned cpus: {cls.cpus}") cmdline = cls.vpp_cmdline if cls.debug_gdbserver: - gdbserver = '/usr/bin/gdbserver' - if not os.path.isfile(gdbserver) or \ - not os.access(gdbserver, os.X_OK): - raise Exception("gdbserver binary '%s' does not exist or is " - "not executable" % gdbserver) - - cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline + gdbserver = "/usr/bin/gdbserver" + if not os.path.isfile(gdbserver) or not os.access(gdbserver, os.X_OK): + raise Exception( + "gdbserver binary '%s' does not exist or is " + "not executable" % gdbserver + ) + + cmdline = [ + gdbserver, + "localhost:{port}".format(port=cls.gdbserver_port), + ] + cls.vpp_cmdline cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline)) try: - cls.vpp = subprocess.Popen(cmdline, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - bufsize=1) + cls.vpp = subprocess.Popen( + cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + except subprocess.CalledProcessError as e: + cls.logger.critical( + "Subprocess returned with non-0 return code: (%s)", e.returncode + ) + raise + except OSError as e: + cls.logger.critical( + "Subprocess returned with OS error: (%s) %s", e.errno, e.strerror + ) + raise except Exception as e: - cls.logger.critical("Couldn't start vpp: %s" % e) + cls.logger.exception("Subprocess returned unexpected from %s:", cmdline) raise cls.wait_for_enter() + @classmethod + def wait_for_coredump(cls): + corefile = cls.tempdir + "/core" + if os.path.isfile(corefile): + cls.logger.error("Waiting for coredump to complete: %s", corefile) + curr_size = os.path.getsize(corefile) + deadline = time.time() + 60 + ok = False + while time.time() < deadline: + cls.sleep(1) + size = curr_size + curr_size = os.path.getsize(corefile) + if size == curr_size: + ok = True + break + if not ok: + cls.logger.error( + "Timed out waiting for coredump to complete: %s", corefile + ) + else: + cls.logger.error("Coredump complete: %s, size %d", corefile, curr_size) + + @classmethod + def get_stats_sock_path(cls): + return "%s/stats.sock" % cls.tempdir + + @classmethod + def get_api_sock_path(cls): + return "%s/api.sock" % cls.tempdir + + @classmethod + def get_api_segment_prefix(cls): + return os.path.basename(cls.tempdir) # Only used for VAPI + + @classmethod + def get_tempdir(cls): + if cls.debug_attach: + tmpdir = f"{config.tmp_dir}/unittest-attach-gdb" + else: + tmpdir = f"{config.tmp_dir}/vpp-unittest-{cls.__name__}" + if config.wipe_tmp_dir: + shutil.rmtree(tmpdir, ignore_errors=True) + os.mkdir(tmpdir) + return tmpdir + + @classmethod + def create_file_handler(cls): + if config.log_dir is None: + cls.file_handler = FileHandler(f"{cls.tempdir}/log.txt") + return + + logdir = f"{config.log_dir}/vpp-unittest-{cls.__name__}" + if config.wipe_tmp_dir: + shutil.rmtree(logdir, ignore_errors=True) + os.mkdir(logdir) + cls.file_handler = FileHandler(f"{logdir}/log.txt") + @classmethod def setUpClass(cls): """ Perform class setup before running the testcase Remove shared memory files, start vpp and connect the vpp-api """ - gc.collect() # run garbage collection first - random.seed() - cls.logger = getLogger(cls.__name__) - cls.tempdir = tempfile.mkdtemp( - prefix='vpp-unittest-%s-' % cls.__name__) - cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir) + super(VppTestCase, cls).setUpClass() + cls.logger = get_logger(cls.__name__) + random.seed(config.rnd_seed) + if hasattr(cls, "parallel_handler"): + cls.logger.addHandler(cls.parallel_handler) + cls.logger.propagate = False + cls.set_debug_flags(config.debug) + cls.tempdir = cls.get_tempdir() + cls.create_file_handler() cls.file_handler.setFormatter( - Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s', - datefmt="%H:%M:%S")) + Formatter(fmt="%(asctime)s,%(msecs)03d %(message)s", datefmt="%H:%M:%S") + ) cls.file_handler.setLevel(DEBUG) cls.logger.addHandler(cls.file_handler) - cls.shm_prefix = cls.tempdir.split("/")[-1] - cls.punt_socket_path = '%s/%s' % (cls.tempdir, vpp_uds_socket_name) + cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__) os.chdir(cls.tempdir) - cls.logger.info("Temporary dir is %s, shm prefix is %s", - cls.tempdir, cls.shm_prefix) + cls.logger.info( + "Temporary dir is %s, api socket is %s", + cls.tempdir, + cls.get_api_sock_path(), + ) + cls.logger.debug("Random seed is %s", config.rnd_seed) cls.setUpConstants() cls.reset_packet_infos() - cls._captures = [] - cls._zombie_captures = [] + cls._pcaps = [] + cls._old_pcaps = [] cls.verbose = 0 cls.vpp_dead = False cls.registry = VppObjectRegistry() @@ -346,80 +717,140 @@ class VppTestCase(unittest.TestCase): # need to catch exceptions here because if we raise, then the cleanup # doesn't get called and we might end with a zombie vpp try: - cls.run_vpp() - cls.reporter.send_keep_alive(cls) + if cls.debug_attach: + cls.attach_vpp() + else: + cls.run_vpp() + if not hasattr(cls, "vpp"): + return + cls.reporter.send_keep_alive(cls, "setUpClass") + VppTestResult.current_test_case_info = TestCaseInfo( + cls.logger, cls.tempdir, cls.vpp.pid, config.vpp + ) cls.vpp_stdout_deque = deque() cls.vpp_stderr_deque = deque() - cls.pump_thread_stop_flag = Event() - cls.pump_thread_wakeup_pipe = os.pipe() - cls.pump_thread = Thread(target=pump_output, args=(cls,)) - cls.pump_thread.daemon = True - cls.pump_thread.start() - cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls) + # Pump thread in a non-debug-attached & not running-vpp + if not cls.debug_attach and not hasattr(cls, "running_vpp"): + cls.pump_thread_stop_flag = Event() + cls.pump_thread_wakeup_pipe = os.pipe() + cls.pump_thread = Thread(target=pump_output, args=(cls,)) + cls.pump_thread.daemon = True + cls.pump_thread.start() + if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach: + cls.vapi_response_timeout = 0 + cls.vapi = VppPapiProvider(cls.__name__, cls, cls.vapi_response_timeout) if cls.step: - hook = StepHook(cls) + hook = hookmodule.StepHook(cls) else: - hook = PollHook(cls) + hook = hookmodule.PollHook(cls) cls.vapi.register_hook(hook) - cls.sleep(0.1, "after vpp startup, before initial poll") + cls.statistics = VPPStats(socketname=cls.get_stats_sock_path()) try: hook.poll_vpp() - except: + except VppDiedError: cls.vpp_startup_failed = True cls.logger.critical( "VPP died shortly after startup, check the" - " output to standard error for possible cause") + " output to standard error for possible cause" + ) raise try: cls.vapi.connect() - except: + except (vpp_papi.VPPIOError, Exception) as e: + cls.logger.debug("Exception connecting to vapi: %s" % e) + cls.vapi.disconnect() + if cls.debug_gdbserver: - print(colorize("You're running VPP inside gdbserver but " - "VPP-API connection failed, did you forget " - "to 'continue' VPP from within gdb?", RED)) - raise - except: - t, v, tb = sys.exc_info() + print( + colorize( + "You're running VPP inside gdbserver but " + "VPP-API connection failed, did you forget " + "to 'continue' VPP from within gdb?", + RED, + ) + ) + raise e + if cls.debug_attach: + last_line = cls.vapi.cli("show thread").split("\n")[-2] + cls.vpp_worker_count = int(last_line.split(" ")[0]) + print("Detected VPP with %s workers." % cls.vpp_worker_count) + except vpp_papi.VPPRuntimeError as e: + cls.logger.debug("%s" % e) + cls.quit() + raise e + except Exception as e: + cls.logger.debug("Exception connecting to VPP: %s" % e) + cls.quit() + raise e + + @classmethod + def _debug_quit(cls): + if cls.debug_gdbserver or cls.debug_gdb: try: - cls.quit() - except: + cls.vpp.poll() + + if cls.vpp.returncode is None: + print() + print(double_line_delim) + print("VPP or GDB server is still running") + print(single_line_delim) + input( + "When done debugging, press ENTER to kill the " + "process and finish running the testcase..." + ) + except AttributeError: pass - raise t, v, tb @classmethod def quit(cls): """ Disconnect vpp-api, kill vpp and cleanup shared memory files """ - if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'): - cls.vpp.poll() - if cls.vpp.returncode is None: - print(double_line_delim) - print("VPP or GDB server is still running") - print(single_line_delim) - raw_input("When done debugging, press ENTER to kill the " - "process and finish running the testcase...") - - os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up') - cls.pump_thread_stop_flag.set() - if hasattr(cls, 'pump_thread'): + cls._debug_quit() + if hasattr(cls, "running_vpp"): + cls.vpp.quit_vpp() + + # first signal that we want to stop the pump thread, then wake it up + if hasattr(cls, "pump_thread_stop_flag"): + cls.pump_thread_stop_flag.set() + if hasattr(cls, "pump_thread_wakeup_pipe"): + os.write(cls.pump_thread_wakeup_pipe[1], b"ding dong wake up") + if hasattr(cls, "pump_thread"): cls.logger.debug("Waiting for pump thread to stop") cls.pump_thread.join() - if hasattr(cls, 'vpp_stderr_reader_thread'): - cls.logger.debug("Waiting for stdderr pump to stop") + if hasattr(cls, "vpp_stderr_reader_thread"): + cls.logger.debug("Waiting for stderr pump to stop") cls.vpp_stderr_reader_thread.join() - if hasattr(cls, 'vpp'): - if hasattr(cls, 'vapi'): + if hasattr(cls, "vpp"): + if hasattr(cls, "vapi"): + cls.logger.debug(cls.vapi.vpp.get_stats()) + cls.logger.debug("Disconnecting class vapi client on %s", cls.__name__) cls.vapi.disconnect() + cls.logger.debug("Deleting class vapi attribute on %s", cls.__name__) del cls.vapi cls.vpp.poll() - if cls.vpp.returncode is None: + if not cls.debug_attach and cls.vpp.returncode is None: + cls.wait_for_coredump() cls.logger.debug("Sending TERM to vpp") cls.vpp.terminate() cls.logger.debug("Waiting for vpp to die") - cls.vpp.communicate() - del cls.vpp + try: + outs, errs = cls.vpp.communicate(timeout=5) + except subprocess.TimeoutExpired: + cls.vpp.kill() + outs, errs = cls.vpp.communicate() + cls.logger.debug("Deleting class vpp attribute on %s", cls.__name__) + if not cls.debug_attach and not hasattr(cls, "running_vpp"): + cls.vpp.stdout.close() + cls.vpp.stderr.close() + # If vpp is a dynamic attribute set by the func use_running, + # deletion will result in an AttributeError that we can + # safetly pass. + try: + del cls.vpp + except AttributeError: + pass if cls.vpp_startup_failed: stdout_log = cls.logger.info @@ -428,74 +859,102 @@ class VppTestCase(unittest.TestCase): stdout_log = cls.logger.info stderr_log = cls.logger.info - if hasattr(cls, 'vpp_stdout_deque'): + if hasattr(cls, "vpp_stdout_deque"): stdout_log(single_line_delim) - stdout_log('VPP output to stdout while running %s:', cls.__name__) + stdout_log("VPP output to stdout while running %s:", cls.__name__) stdout_log(single_line_delim) vpp_output = "".join(cls.vpp_stdout_deque) - with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f: + with open(cls.tempdir + "/vpp_stdout.txt", "w") as f: f.write(vpp_output) - stdout_log('\n%s', vpp_output) + stdout_log("\n%s", vpp_output) stdout_log(single_line_delim) - if hasattr(cls, 'vpp_stderr_deque'): + if hasattr(cls, "vpp_stderr_deque"): stderr_log(single_line_delim) - stderr_log('VPP output to stderr while running %s:', cls.__name__) + stderr_log("VPP output to stderr while running %s:", cls.__name__) stderr_log(single_line_delim) vpp_output = "".join(cls.vpp_stderr_deque) - with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f: + with open(cls.tempdir + "/vpp_stderr.txt", "w") as f: f.write(vpp_output) - stderr_log('\n%s', vpp_output) + stderr_log("\n%s", vpp_output) stderr_log(single_line_delim) @classmethod def tearDownClass(cls): - """ Perform final cleanup after running all tests in this test-case """ + """Perform final cleanup after running all tests in this test-case""" + cls.logger.debug("--- tearDownClass() for %s called ---" % cls.__name__) + if not hasattr(cls, "vpp"): + return + cls.reporter.send_keep_alive(cls, "tearDownClass") cls.quit() cls.file_handler.close() + cls.reset_packet_infos() + if config.debug_framework: + debug_internal.on_tear_down_class(cls) + + def show_commands_at_teardown(self): + """Allow subclass specific teardown logging additions.""" + self.logger.info("--- No test specific show commands provided. ---") def tearDown(self): - """ Show various debug prints after each test """ - self.logger.debug("--- tearDown() for %s.%s(%s) called ---" % - (self.__class__.__name__, self._testMethodName, - self._testMethodDoc)) - if not self.vpp_dead: - self.logger.debug(self.vapi.cli("show trace")) - self.logger.info(self.vapi.ppcli("show interface")) - self.logger.info(self.vapi.ppcli("show hardware")) - self.logger.info(self.vapi.ppcli("show error")) - self.logger.info(self.vapi.ppcli("show run")) - self.registry.remove_vpp_config(self.logger) + """Show various debug prints after each test""" + self.logger.debug( + "--- tearDown() for %s.%s(%s) called ---" + % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) + ) + if not hasattr(self, "vpp"): + return + + try: + if not self.vpp_dead: + self.logger.debug(self.vapi.cli("show trace max 1000")) + self.logger.info(self.vapi.ppcli("show interface")) + self.logger.info(self.vapi.ppcli("show hardware")) + self.logger.info(self.statistics.set_errors_str()) + self.logger.info(self.vapi.ppcli("show run")) + self.logger.info(self.vapi.ppcli("show log")) + self.logger.info(self.vapi.ppcli("show bihash")) + self.logger.info("Logging testcase specific show commands.") + self.show_commands_at_teardown() + if self.remove_configured_vpp_objects_on_tear_down: + self.registry.remove_vpp_config(self.logger) # Save/Dump VPP api trace log - api_trace = "vpp_api_trace.%s.log" % self._testMethodName + m = self._testMethodName + api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid) tmp_api_trace = "/tmp/%s" % api_trace vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace) self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace)) - self.logger.info("Moving %s to %s\n" % (tmp_api_trace, - vpp_api_trace_log)) - os.rename(tmp_api_trace, vpp_api_trace_log) - self.logger.info(self.vapi.ppcli("api trace dump %s" % - vpp_api_trace_log)) + self.logger.info("Moving %s to %s\n" % (tmp_api_trace, vpp_api_trace_log)) + shutil.move(tmp_api_trace, vpp_api_trace_log) + except VppTransportSocketIOError: + self.logger.debug( + "VppTransportSocketIOError: Vpp dead. Cannot log show commands." + ) + self.vpp_dead = True else: self.registry.unregister_all(self.logger) def setUp(self): - """ Clear trace before running each test""" + """Clear trace before running each test""" + super(VppTestCase, self).setUp() + if not hasattr(self, "vpp"): + return self.reporter.send_keep_alive(self) - self.logger.debug("--- setUp() for %s.%s(%s) called ---" % - (self.__class__.__name__, self._testMethodName, - self._testMethodDoc)) if self.vpp_dead: - raise Exception("VPP is dead when setting up the test") - self.sleep(.1, "during setUp") + raise VppDiedError( + rv=None, + testcase=self.__class__.__name__, + method_name=self._testMethodName, + ) + self.sleep(0.1, "during setUp") self.vpp_stdout_deque.append( - "--- test setUp() for %s.%s(%s) starts here ---\n" % - (self.__class__.__name__, self._testMethodName, - self._testMethodDoc)) + "--- test setUp() for %s.%s(%s) starts here ---\n" + % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) + ) self.vpp_stderr_deque.append( - "--- test setUp() for %s.%s(%s) starts here ---\n" % - (self.__class__.__name__, self._testMethodName, - self._testMethodDoc)) + "--- test setUp() for %s.%s(%s) starts here ---\n" + % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) + ) self.vapi.cli("clear trace") # store the test instance inside the test class - so that objects # holding the class can access instance methods (like assertEqual) @@ -516,37 +975,57 @@ class VppTestCase(unittest.TestCase): i.enable_capture() @classmethod - def register_capture(cls, cap_name): - """ Register a capture in the testclass """ + def register_pcap(cls, intf, worker): + """Register a pcap in the testclass""" # add to the list of captures with current timestamp - cls._captures.append((time.time(), cap_name)) - # filter out from zombies - cls._zombie_captures = [(stamp, name) - for (stamp, name) in cls._zombie_captures - if name != cap_name] + cls._pcaps.append((intf, worker)) + + @classmethod + def get_vpp_time(cls): + # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT" + # returns float("2.190522") + timestr = cls.vapi.cli("show clock") + head, sep, tail = timestr.partition(",") + head, sep, tail = head.partition("Time now") + return float(tail) + + @classmethod + def sleep_on_vpp_time(cls, sec): + """Sleep according to time in VPP world""" + # On a busy system with many processes + # we might end up with VPP time being slower than real world + # So take that into account when waiting for VPP to do something + start_time = cls.get_vpp_time() + while cls.get_vpp_time() - start_time < sec: + cls.sleep(0.1) @classmethod - def pg_start(cls): - """ Remove any zombie captures and enable the packet generator """ - # how long before capture is allowed to be deleted - otherwise vpp - # crashes - 100ms seems enough (this shouldn't be needed at all) - capture_ttl = 0.1 - now = time.time() - for stamp, cap_name in cls._zombie_captures: - wait = stamp + capture_ttl - now - if wait > 0: - cls.sleep(wait, "before deleting capture %s" % cap_name) - now = time.time() - cls.logger.debug("Removing zombie capture %s" % cap_name) - cls.vapi.cli('packet-generator delete %s' % cap_name) - - cls.vapi.cli("trace add pg-input 50") # 50 is maximum - cls.vapi.cli('packet-generator enable') - cls._zombie_captures = cls._captures - cls._captures = [] + def pg_start(cls, trace=True): + """Enable the PG, wait till it is done, then clean up""" + for intf, worker in cls._old_pcaps: + intf.handle_old_pcap_file(intf.get_in_path(worker), intf.in_history_counter) + cls._old_pcaps = [] + if trace: + cls.vapi.cli("clear trace") + cls.vapi.cli("trace add pg-input 1000") + cls.vapi.cli("packet-generator enable") + # PG, when starts, runs to completion - + # so let's avoid a race condition, + # and wait a little till it's done. + # Then clean it up - and then be gone. + deadline = time.time() + 300 + while cls.vapi.cli("show packet-generator").find("Yes") != -1: + cls.sleep(0.01) # yield + if time.time() > deadline: + cls.logger.error("Timeout waiting for pg to stop") + break + for intf, worker in cls._pcaps: + cls.vapi.cli("packet-generator delete %s" % intf.get_cap_name(worker)) + cls._old_pcaps = cls._pcaps + cls._pcaps = [] @classmethod - def create_pg_interfaces(cls, interfaces): + def create_pg_interfaces_internal(cls, interfaces, gso=0, gso_size=0, mode=None): """ Create packet-generator interfaces. @@ -556,30 +1035,88 @@ class VppTestCase(unittest.TestCase): """ result = [] for i in interfaces: - intf = VppPGInterface(cls, i) + intf = VppPGInterface(cls, i, gso, gso_size, mode) setattr(cls, intf.name, intf) result.append(intf) cls.pg_interfaces = result return result @classmethod - def create_loopback_interfaces(cls, interfaces): + def create_pg_ip4_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_IP4 + ) + + @classmethod + def create_pg_ip6_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_IP6 + ) + + @classmethod + def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET + ) + + @classmethod + def create_pg_ethernet_interfaces(cls, interfaces, gso=0, gso_size=0): + if not hasattr(cls, "vpp"): + cls.pg_interfaces = [] + return cls.pg_interfaces + pgmode = VppEnum.vl_api_pg_interface_mode_t + return cls.create_pg_interfaces_internal( + interfaces, gso, gso_size, pgmode.PG_API_MODE_ETHERNET + ) + + @classmethod + def create_loopback_interfaces(cls, count): """ Create loopback interfaces. - :param interfaces: iterable indexes of the interfaces. + :param count: number of interfaces created. :returns: List of created interfaces. """ - result = [] - for i in interfaces: - intf = VppLoInterface(cls, i) + if not hasattr(cls, "vpp"): + cls.lo_interfaces = [] + return cls.lo_interfaces + result = [VppLoInterface(cls) for i in range(count)] + for intf in result: setattr(cls, intf.name, intf) - result.append(intf) cls.lo_interfaces = result return result + @classmethod + def create_bvi_interfaces(cls, count): + """ + Create BVI interfaces. + + :param count: number of interfaces created. + :returns: List of created interfaces. + """ + if not hasattr(cls, "vpp"): + cls.bvi_interfaces = [] + return cls.bvi_interfaces + result = [VppBviInterface(cls) for i in range(count)] + for intf in result: + setattr(cls, intf.name, intf) + cls.bvi_interfaces = result + return result + @staticmethod - def extend_packet(packet, size, padding=' '): + def extend_packet(packet, size, padding=" "): """ Extend packet to given size by padding with spaces or custom padding NOTE: Currently works only when Raw layer is present. @@ -592,12 +1129,12 @@ class VppTestCase(unittest.TestCase): packet_len = len(packet) + 4 extend = size - packet_len if extend > 0: - num = (extend / len(padding)) + 1 - packet[Raw].load += (padding * num)[:extend] + num = (extend // len(padding)) + 1 + packet[Raw].load += (padding * num)[:extend].encode("ascii") @classmethod def reset_packet_infos(cls): - """ Reset the list of packet info objects and packet counts to zero """ + """Reset the list of packet info objects and packet counts to zero""" cls._packet_infos = {} cls._packet_count_for_dst_if_idx = {} @@ -637,26 +1174,34 @@ class VppTestCase(unittest.TestCase): :returns: string containing serialized data from packet info """ - return "%d %d %d %d %d" % (info.index, info.src, info.dst, - info.ip, info.proto) + + # retrieve payload, currently 18 bytes (4 x ints + 1 short) + return pack("iiiih", info.index, info.src, info.dst, info.ip, info.proto) @staticmethod - def payload_to_info(payload): + def payload_to_info(payload, payload_field="load"): """ Convert packet payload to _PacketInfo object :param payload: packet payload - + :type payload: + :param payload_field: packet fieldname of payload "load" for + + :type payload_field: str :returns: _PacketInfo object containing de-serialized data from payload """ - numbers = payload.split() + + # retrieve payload, currently 18 bytes (4 x ints + 1 short) + payload_b = getattr(payload, payload_field)[:18] + info = _PacketInfo() - info.index = int(numbers[0]) - info.src = int(numbers[1]) - info.dst = int(numbers[2]) - info.ip = int(numbers[3]) - info.proto = int(numbers[4]) + info.index, info.src, info.dst, info.ip, info.proto = unpack("iiiih", payload_b) + + # some SRv6 TCs depend on get an exception if bad values are detected + if info.index > 0x4000: + raise ValueError("Index value is invalid") + return info def get_next_packet_info(self, info): @@ -718,77 +1263,373 @@ class VppTestCase(unittest.TestCase): return try: msg = "Invalid %s: %d('%s') does not match expected value %d('%s')" - msg = msg % (getdoc(name_or_class).strip(), - real_value, str(name_or_class(real_value)), - expected_value, str(name_or_class(expected_value))) - except: + msg = msg % ( + getdoc(name_or_class).strip(), + real_value, + str(name_or_class(real_value)), + expected_value, + str(name_or_class(expected_value)), + ) + except Exception: msg = "Invalid %s: %s does not match expected value %s" % ( - name_or_class, real_value, expected_value) + name_or_class, + real_value, + expected_value, + ) self.assertEqual(real_value, expected_value, msg) - def assert_in_range(self, - real_value, - expected_min, - expected_max, - name=None): + def assert_in_range(self, real_value, expected_min, expected_max, name=None): if name is None: msg = None else: msg = "Invalid %s: %s out of range <%s,%s>" % ( - name, real_value, expected_min, expected_max) + name, + real_value, + expected_min, + expected_max, + ) self.assertTrue(expected_min <= real_value <= expected_max, msg) + def assert_packet_checksums_valid(self, packet, ignore_zero_udp_checksums=True): + received = packet.__class__(scapy.compat.raw(packet)) + udp_layers = ["UDP", "UDPerror"] + checksum_fields = ["cksum", "chksum"] + checksums = [] + counter = 0 + temp = received.__class__(scapy.compat.raw(received)) + while True: + layer = temp.getlayer(counter) + if layer: + layer = layer.copy() + layer.remove_payload() + for cf in checksum_fields: + if hasattr(layer, cf): + if ( + ignore_zero_udp_checksums + and 0 == getattr(layer, cf) + and layer.name in udp_layers + ): + continue + delattr(temp.getlayer(counter), cf) + checksums.append((counter, cf)) + else: + break + counter = counter + 1 + if 0 == len(checksums): + return + temp = temp.__class__(scapy.compat.raw(temp)) + for layer, cf in reversed(checksums): + calc_sum = getattr(temp[layer], cf) + self.assert_equal( + getattr(received[layer], cf), + calc_sum, + "packet checksum on layer #%d: %s" % (layer, temp[layer].name), + ) + self.logger.debug( + "Checksum field `%s` on `%s` layer has correct value `%s`" + % (cf, temp[layer].name, calc_sum) + ) + + def assert_checksum_valid( + self, + received_packet, + layer, + checksum_field_names=["chksum", "cksum"], + ignore_zero_checksum=False, + ): + """Check checksum of received packet on given layer""" + layer_copy = received_packet[layer].copy() + layer_copy.remove_payload() + field_name = None + for f in checksum_field_names: + if hasattr(layer_copy, f): + field_name = f + break + if field_name is None: + raise Exception( + f"Layer `{layer}` has none of checksum fields: `{checksum_field_names}`." + ) + received_packet_checksum = getattr(received_packet[layer], field_name) + if ignore_zero_checksum and 0 == received_packet_checksum: + return + recalculated = received_packet.__class__(scapy.compat.raw(received_packet)) + delattr(recalculated[layer], field_name) + recalculated = recalculated.__class__(scapy.compat.raw(recalculated)) + self.assert_equal( + received_packet_checksum, + getattr(recalculated[layer], field_name), + f"packet checksum (field: {field_name}) on layer: %s" % layer, + ) + + def assert_ip_checksum_valid(self, received_packet, ignore_zero_checksum=False): + self.assert_checksum_valid( + received_packet, "IP", ignore_zero_checksum=ignore_zero_checksum + ) + + def assert_tcp_checksum_valid(self, received_packet, ignore_zero_checksum=False): + self.assert_checksum_valid( + received_packet, "TCP", ignore_zero_checksum=ignore_zero_checksum + ) + + def assert_udp_checksum_valid(self, received_packet, ignore_zero_checksum=True): + self.assert_checksum_valid( + received_packet, "UDP", ignore_zero_checksum=ignore_zero_checksum + ) + + def assert_embedded_icmp_checksum_valid(self, received_packet): + if received_packet.haslayer(IPerror): + self.assert_checksum_valid(received_packet, "IPerror") + if received_packet.haslayer(TCPerror): + self.assert_checksum_valid(received_packet, "TCPerror") + if received_packet.haslayer(UDPerror): + self.assert_checksum_valid( + received_packet, "UDPerror", ignore_zero_checksum=True + ) + if received_packet.haslayer(ICMPerror): + self.assert_checksum_valid(received_packet, "ICMPerror") + + def assert_icmp_checksum_valid(self, received_packet): + self.assert_checksum_valid(received_packet, "ICMP") + self.assert_embedded_icmp_checksum_valid(received_packet) + + def assert_icmpv6_checksum_valid(self, pkt): + if pkt.haslayer(ICMPv6DestUnreach): + self.assert_checksum_valid(pkt, "ICMPv6DestUnreach") + self.assert_embedded_icmp_checksum_valid(pkt) + if pkt.haslayer(ICMPv6EchoRequest): + self.assert_checksum_valid(pkt, "ICMPv6EchoRequest") + if pkt.haslayer(ICMPv6EchoReply): + self.assert_checksum_valid(pkt, "ICMPv6EchoReply") + + def get_counter(self, counter): + if counter.startswith("/"): + counter_value = self.statistics.get_counter(counter) + else: + counters = self.vapi.cli("sh errors").split("\n") + counter_value = 0 + for i in range(1, len(counters) - 1): + results = counters[i].split() + if results[1] == counter: + counter_value = int(results[0]) + break + return counter_value + + def assert_counter_equal(self, counter, expected_value, thread=None, index=0): + c = self.get_counter(counter) + if thread is not None: + c = c[thread][index] + else: + c = sum(x[index] for x in c) + self.logger.debug( + "validate counter `%s[%s]', expected: %s, real value: %s" + % (counter, index, expected_value, c) + ) + self.assert_equal(c, expected_value, "counter `%s[%s]'" % (counter, index)) + + def assert_packet_counter_equal(self, counter, expected_value): + counter_value = self.get_counter(counter) + self.assert_equal( + counter_value, expected_value, "packet counter `%s'" % counter + ) + + def assert_error_counter_equal(self, counter, expected_value): + counter_value = self.statistics[counter].sum() + self.assert_equal(counter_value, expected_value, "error counter `%s'" % counter) + @classmethod def sleep(cls, timeout, remark=None): - if hasattr(cls, 'logger'): - cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark)) + # /* Allow sleep(0) to maintain win32 semantics, and as decreed + # * by Guido, only the main thread can be interrupted. + # */ + # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa + if timeout == 0: + # yield quantum + if hasattr(os, "sched_yield"): + os.sched_yield() + else: + time.sleep(0) + return + + cls.logger.debug("Starting sleep for %es (%s)", timeout, remark) before = time.time() time.sleep(timeout) after = time.time() if after - before > 2 * timeout: - cls.logger.error("unexpected time.sleep() result - " - "slept for %ss instead of ~%ss!" % ( - after - before, timeout)) - if hasattr(cls, 'logger'): - cls.logger.debug( - "Finished sleep (%s) - slept %ss (wanted %ss)" % ( - remark, after - before, timeout)) - - def send_and_assert_no_replies(self, intf, pkts, remark=""): - self.vapi.cli("clear trace") - intf.add_stream(pkts) + cls.logger.error( + "unexpected self.sleep() result - slept for %es instead of ~%es!", + after - before, + timeout, + ) + + cls.logger.debug( + "Finished sleep (%s) - slept %es (wanted %es)", + remark, + after - before, + timeout, + ) + + def virtual_sleep(self, timeout, remark=None): + self.logger.debug("Moving VPP time by %s (%s)", timeout, remark) + self.vapi.cli("set clock adjust %s" % timeout) + + def pg_send(self, intf, pkts, worker=None, trace=True): + intf.add_stream(pkts, worker=worker) self.pg_enable_capture(self.pg_interfaces) - self.pg_start() - timeout = 1 - for i in self.pg_interfaces: - i.get_capture(0, timeout=timeout) - i.assert_nothing_captured(remark=remark) - timeout = 0.1 + self.pg_start(trace=trace) + + def snapshot_stats(self, stats_diff): + """Return snapshot of interesting stats based on diff dictionary.""" + stats_snapshot = {} + for sw_if_index in stats_diff: + for counter in stats_diff[sw_if_index]: + stats_snapshot[counter] = self.statistics[counter] + self.logger.debug(f"Took statistics stats_snapshot: {stats_snapshot}") + return stats_snapshot + + def compare_stats_with_snapshot(self, stats_diff, stats_snapshot): + """Assert appropriate difference between current stats and snapshot.""" + for sw_if_index in stats_diff: + for cntr, diff in stats_diff[sw_if_index].items(): + if sw_if_index == "err": + self.assert_equal( + self.statistics[cntr].sum(), + stats_snapshot[cntr].sum() + diff, + f"'{cntr}' counter value (previous value: " + f"{stats_snapshot[cntr].sum()}, " + f"expected diff: {diff})", + ) + else: + try: + self.assert_equal( + self.statistics[cntr][:, sw_if_index].sum(), + stats_snapshot[cntr][:, sw_if_index].sum() + diff, + f"'{cntr}' counter value (previous value: " + f"{stats_snapshot[cntr][:, sw_if_index].sum()}, " + f"expected diff: {diff})", + ) + except IndexError as e: + # if diff is 0, then this most probably a case where + # test declares multiple interfaces but traffic hasn't + # passed through this one yet - which means the counter + # value is 0 and can be ignored + if 0 != diff: + raise Exception( + f"Couldn't sum counter: {cntr} on sw_if_index: {sw_if_index}" + ) from e + + def send_and_assert_no_replies( + self, intf, pkts, remark="", timeout=None, stats_diff=None, trace=True, msg=None + ): + if stats_diff: + stats_snapshot = self.snapshot_stats(stats_diff) + + self.pg_send(intf, pkts) - def send_and_expect(self, input, pkts, output): - self.vapi.cli("clear trace") - input.add_stream(pkts) - self.pg_enable_capture(self.pg_interfaces) - self.pg_start() + try: + if not timeout: + timeout = 1 + for i in self.pg_interfaces: + i.assert_nothing_captured(timeout=timeout, remark=remark) + timeout = 0.1 + finally: + if trace: + if msg: + self.logger.debug(f"send_and_assert_no_replies: {msg}") + self.logger.debug(self.vapi.cli("show trace")) + + if stats_diff: + self.compare_stats_with_snapshot(stats_diff, stats_snapshot) + + def send_and_expect( + self, + intf, + pkts, + output, + n_rx=None, + worker=None, + trace=True, + msg=None, + stats_diff=None, + ): + if stats_diff: + stats_snapshot = self.snapshot_stats(stats_diff) + + if not n_rx: + n_rx = 1 if isinstance(pkts, Packet) else len(pkts) + self.pg_send(intf, pkts, worker=worker, trace=trace) + rx = output.get_capture(n_rx) + if trace: + if msg: + self.logger.debug(f"send_and_expect: {msg}") + self.logger.debug(self.vapi.cli("show trace")) + + if stats_diff: + self.compare_stats_with_snapshot(stats_diff, stats_snapshot) + + return rx + + def send_and_expect_load_balancing( + self, input, pkts, outputs, worker=None, trace=True + ): + self.pg_send(input, pkts, worker=worker, trace=trace) + rxs = [] + for oo in outputs: + rx = oo._get_capture(1) + self.assertNotEqual(0, len(rx)) + rxs.append(rx) + if trace: + self.logger.debug(self.vapi.cli("show trace")) + return rxs + + def send_and_expect_some(self, intf, pkts, output, worker=None, trace=True): + self.pg_send(intf, pkts, worker=worker, trace=trace) + rx = output._get_capture(1) + if trace: + self.logger.debug(self.vapi.cli("show trace")) + self.assertTrue(len(rx) > 0) + self.assertTrue(len(rx) < len(pkts)) + return rx + + def send_and_expect_only(self, intf, pkts, output, timeout=None, stats_diff=None): + if stats_diff: + stats_snapshot = self.snapshot_stats(stats_diff) + + self.pg_send(intf, pkts) rx = output.get_capture(len(pkts)) + outputs = [output] + if not timeout: + timeout = 1 + for i in self.pg_interfaces: + if i not in outputs: + i.assert_nothing_captured(timeout=timeout) + timeout = 0.1 + + if stats_diff: + self.compare_stats_with_snapshot(stats_diff, stats_snapshot) + return rx -class TestCasePrinter(object): - _shared_state = {} +def get_testcase_doc_name(test): + return getdoc(test.__class__).splitlines()[0] - def __init__(self): - self.__dict__ = self._shared_state - if not hasattr(self, "_test_case_set"): - self._test_case_set = set() - def print_test_case_heading_if_first_time(self, case): - if case.__class__ not in self._test_case_set: - print(double_line_delim) - print(colorize(getdoc(case.__class__).splitlines()[0], YELLOW)) - print(double_line_delim) - self._test_case_set.add(case.__class__) +def get_test_description(descriptions, test): + short_description = test.shortDescription() + if descriptions and short_description: + return short_description + else: + return str(test) + + +class TestCaseInfo(object): + def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path): + self.logger = logger + self.tempdir = tempdir + self.vpp_pid = vpp_pid + self.vpp_bin_path = vpp_bin_path + self.core_crash_test = None class VppTestResult(unittest.TestResult): @@ -806,7 +1647,11 @@ class VppTestResult(unittest.TestResult): methods. """ - def __init__(self, stream, descriptions, verbosity): + failed_test_cases_info = set() + core_crash_test_cases_info = set() + current_test_case_info = None + + def __init__(self, stream=None, descriptions=None, verbosity=None, runner=None): """ :param stream File descriptor to store where to report test results. Set to the standard error stream by default. @@ -814,12 +1659,14 @@ class VppTestResult(unittest.TestResult): test case descriptions. :param verbosity Integer variable to store required verbosity level. """ - unittest.TestResult.__init__(self, stream, descriptions, verbosity) + super(VppTestResult, self).__init__(stream, descriptions, verbosity) self.stream = stream self.descriptions = descriptions self.verbosity = verbosity + self.result_code = TestResultCode.TEST_RUN self.result_string = None - self.printer = TestCasePrinter() + self.runner = runner + self.printed = [] def addSuccess(self, test): """ @@ -828,13 +1675,25 @@ class VppTestResult(unittest.TestResult): :param test: """ - if hasattr(test, 'logger'): - test.logger.debug("--- addSuccess() %s.%s(%s) called" - % (test.__class__.__name__, - test._testMethodName, - test._testMethodDoc)) + self.log_result("addSuccess", test) unittest.TestResult.addSuccess(self, test) self.result_string = colorize("OK", GREEN) + self.result_code = TestResultCode.PASS + self.send_result_through_pipe(test, self.result_code) + + def addExpectedFailure(self, test, err): + self.log_result("addExpectedFailure", test, err) + super().addExpectedFailure(test, err) + self.result_string = colorize("FAIL", GREEN) + self.result_code = TestResultCode.EXPECTED_FAIL + self.send_result_through_pipe(test, self.result_code) + + def addUnexpectedSuccess(self, test): + self.log_result("addUnexpectedSuccess", test) + super().addUnexpectedSuccess(test) + self.result_string = colorize("OK", RED) + self.result_code = TestResultCode.UNEXPECTED_PASS + self.send_result_through_pipe(test, self.result_code) def addSkip(self, test, reason): """ @@ -844,38 +1703,102 @@ class VppTestResult(unittest.TestResult): :param reason: """ - if hasattr(test, 'logger'): - test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s" - % (test.__class__.__name__, - test._testMethodName, - test._testMethodDoc, - reason)) + self.log_result("addSkip", test, reason=reason) unittest.TestResult.addSkip(self, test, reason) self.result_string = colorize("SKIP", YELLOW) - def symlink_failed(self, test): - logger = None - if hasattr(test, 'logger'): - logger = test.logger - if hasattr(test, 'tempdir'): + if reason == "not enough cpus": + self.result_code = TestResultCode.SKIP_CPU_SHORTAGE + else: + self.result_code = TestResultCode.SKIP + self.send_result_through_pipe(test, self.result_code) + + def symlink_failed(self): + if self.current_test_case_info: try: - failed_dir = os.getenv('VPP_TEST_FAILED_DIR') - link_path = '%s/%s-FAILED' % (failed_dir, - test.tempdir.split("/")[-1]) - if logger: - logger.debug("creating a link to the failed test") - logger.debug("os.symlink(%s, %s)" % - (test.tempdir, link_path)) - os.symlink(test.tempdir, link_path) + failed_dir = config.failed_dir + link_path = os.path.join( + failed_dir, + "%s-FAILED" % os.path.basename(self.current_test_case_info.tempdir), + ) + + self.current_test_case_info.logger.debug( + "creating a link to the failed test" + ) + self.current_test_case_info.logger.debug( + "os.symlink(%s, %s)" + % (self.current_test_case_info.tempdir, link_path) + ) + if os.path.exists(link_path): + self.current_test_case_info.logger.debug("symlink already exists") + else: + os.symlink(self.current_test_case_info.tempdir, link_path) + except Exception as e: - if logger: - logger.error(e) + self.current_test_case_info.logger.error(e) - def send_failure_through_pipe(self, test): - if hasattr(self, 'test_framework_failed_pipe'): - pipe = self.test_framework_failed_pipe + def send_result_through_pipe(self, test, result): + if hasattr(self, "test_framework_result_pipe"): + pipe = self.test_framework_result_pipe if pipe: - pipe.send(test.__class__) + pipe.send((test.id(), result)) + + def log_result(self, fn, test, err=None, reason=None): + if self.current_test_case_info: + if isinstance(test, unittest.suite._ErrorHolder): + test_name = test.description + else: + test_name = "%s.%s(%s)" % ( + test.__class__.__name__, + test._testMethodName, + test._testMethodDoc, + ) + extra_msg = "" + if err: + extra_msg += f", error is {err}" + if reason: + extra_msg += f", reason is {reason}" + self.current_test_case_info.logger.debug( + f"--- {fn}() {test_name} called{extra_msg}" + ) + if err: + self.current_test_case_info.logger.debug( + "formatted exception is:\n%s" % "".join(format_exception(*err)) + ) + + def add_error(self, test, err, unittest_fn, result_code): + self.result_code = result_code + if result_code == TestResultCode.FAIL: + self.log_result("addFailure", test, err=err) + error_type_str = colorize("FAIL", RED) + elif result_code == TestResultCode.ERROR: + self.log_result("addError", test, err=err) + error_type_str = colorize("ERROR", RED) + else: + raise Exception(f"Unexpected result code {result_code}") + + unittest_fn(self, test, err) + if self.current_test_case_info: + self.result_string = "%s [ temp dir used by test case: %s ]" % ( + error_type_str, + self.current_test_case_info.tempdir, + ) + self.symlink_failed() + self.failed_test_cases_info.add(self.current_test_case_info) + if is_core_present(self.current_test_case_info.tempdir): + if not self.current_test_case_info.core_crash_test: + if isinstance(test, unittest.suite._ErrorHolder): + test_name = str(test) + else: + test_name = "'{!s}' ({!s})".format( + get_testcase_doc_name(test), test.id() + ) + self.current_test_case_info.core_crash_test = test_name + self.core_crash_test_cases_info.add(self.current_test_case_info) + else: + self.result_string = "%s [no temp dir]" % error_type_str + + self.send_result_through_pipe(test, result_code) def addFailure(self, test, err): """ @@ -885,22 +1808,7 @@ class VppTestResult(unittest.TestResult): :param err: error message """ - if hasattr(test, 'logger'): - test.logger.debug("--- addFailure() %s.%s(%s) called, err is %s" - % (test.__class__.__name__, - test._testMethodName, - test._testMethodDoc, err)) - test.logger.debug("formatted exception is:\n%s" % - "".join(format_exception(*err))) - unittest.TestResult.addFailure(self, test, err) - if hasattr(test, 'tempdir'): - self.result_string = colorize("FAIL", RED) + \ - ' [ temp dir used by test case: ' + test.tempdir + ' ]' - self.symlink_failed(test) - else: - self.result_string = colorize("FAIL", RED) + ' [no temp dir]' - - self.send_failure_through_pipe(test) + self.add_error(test, err, unittest.TestResult.addFailure, TestResultCode.FAIL) def addError(self, test, err): """ @@ -910,22 +1818,7 @@ class VppTestResult(unittest.TestResult): :param err: error message """ - if hasattr(test, 'logger'): - test.logger.debug("--- addError() %s.%s(%s) called, err is %s" - % (test.__class__.__name__, - test._testMethodName, - test._testMethodDoc, err)) - test.logger.debug("formatted exception is:\n%s" % - "".join(format_exception(*err))) - unittest.TestResult.addError(self, test, err) - if hasattr(test, 'tempdir'): - self.result_string = colorize("ERROR", RED) + \ - ' [ temp dir used by test case: ' + test.tempdir + ' ]' - self.symlink_failed(test) - else: - self.result_string = colorize("ERROR", RED) + ' [no temp dir]' - - self.send_failure_through_pipe(test) + self.add_error(test, err, unittest.TestResult.addError, TestResultCode.ERROR) def getDescription(self, test): """ @@ -935,12 +1828,7 @@ class VppTestResult(unittest.TestResult): :returns: test description """ - # TODO: if none print warning not raise exception - short_description = test.shortDescription() - if self.descriptions and short_description: - return short_description - else: - return str(test) + return get_test_description(self.descriptions, test) def startTest(self, test): """ @@ -949,37 +1837,129 @@ class VppTestResult(unittest.TestResult): :param test: """ - self.printer.print_test_case_heading_if_first_time(test) + + def print_header(test): + if test.__class__ in self.printed: + return + + test_doc = getdoc(test) + if not test_doc: + raise Exception("No doc string for test '%s'" % test.id()) + + test_title = test_doc.splitlines()[0].rstrip() + test_title = colorize(test_title, GREEN) + if test.is_tagged_run_solo(): + test_title = colorize(f"SOLO RUN: {test_title}", YELLOW) + + # This block may overwrite the colorized title above, + # but we want this to stand out and be fixed + if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS): + test_title = colorize(f"FIXME with VPP workers: {test_title}", RED) + + if test.has_tag(TestCaseTag.FIXME_ASAN): + test_title = colorize(f"FIXME with ASAN: {test_title}", RED) + test.skip_fixme_asan() + + if is_distro_ubuntu2204 == True and test.has_tag( + TestCaseTag.FIXME_UBUNTU2204 + ): + test_title = colorize(f"FIXME on Ubuntu-22.04: {test_title}", RED) + test.skip_fixme_ubuntu2204() + + if is_distro_debian11 == True and test.has_tag(TestCaseTag.FIXME_DEBIAN11): + test_title = colorize(f"FIXME on Debian-11: {test_title}", RED) + test.skip_fixme_debian11() + + if "debug" in config.vpp_tag and test.has_tag(TestCaseTag.FIXME_VPP_DEBUG): + test_title = colorize(f"FIXME on VPP Debug: {test_title}", RED) + test.skip_fixme_vpp_debug() + + if hasattr(test, "vpp_worker_count"): + if test.vpp_worker_count == 0: + test_title += " [main thread only]" + elif test.vpp_worker_count == 1: + test_title += " [1 worker thread]" + else: + test_title += f" [{test.vpp_worker_count} worker threads]" + + if test.__class__.skipped_due_to_cpu_lack: + test_title = colorize( + f"{test_title} [skipped - not enough cpus, " + f"required={test.__class__.get_cpus_required()}, " + f"available={max_vpp_cpus}]", + YELLOW, + ) + + print(double_line_delim) + print(test_title) + print(double_line_delim) + self.printed.append(test.__class__) + + print_header(test) + self.start_test = time.time() unittest.TestResult.startTest(self, test) if self.verbosity > 0: - self.stream.writeln( - "Starting " + self.getDescription(test) + " ...") + self.stream.writeln("Starting " + self.getDescription(test) + " ...") self.stream.writeln(single_line_delim) def stopTest(self, test): """ - Stop a test + Called when the given test has been run :param test: """ unittest.TestResult.stopTest(self, test) + + result_code_to_suffix = { + TestResultCode.PASS: "", + TestResultCode.FAIL: "", + TestResultCode.ERROR: "", + TestResultCode.SKIP: "", + TestResultCode.TEST_RUN: "", + TestResultCode.SKIP_CPU_SHORTAGE: "", + TestResultCode.EXPECTED_FAIL: " [EXPECTED FAIL]", + TestResultCode.UNEXPECTED_PASS: " [UNEXPECTED PASS]", + } + if self.verbosity > 0: self.stream.writeln(single_line_delim) - self.stream.writeln("%-73s%s" % (self.getDescription(test), - self.result_string)) + self.stream.writeln( + "%-72s%s%s" + % ( + self.getDescription(test), + self.result_string, + result_code_to_suffix[self.result_code], + ) + ) self.stream.writeln(single_line_delim) else: - self.stream.writeln("%-73s%s" % (self.getDescription(test), - self.result_string)) + self.stream.writeln( + "%-67s %4.2f %s%s" + % ( + self.getDescription(test), + time.time() - self.start_test, + self.result_string, + result_code_to_suffix[self.result_code], + ) + ) + + self.send_result_through_pipe(test, TestResultCode.TEST_RUN) def printErrors(self): """ Print errors from running the test case """ - self.stream.writeln() - self.printErrorList('ERROR', self.errors) - self.printErrorList('FAIL', self.failures) + if len(self.errors) > 0 or len(self.failures) > 0: + self.stream.writeln() + self.printErrorList("ERROR", self.errors) + self.printErrorList("FAIL", self.failures) + + # ^^ that is the last output from unittest before summary + if not self.runner.print_summary: + devnull = unittest.runner._WritelnDecorator(open(os.devnull, "w")) + self.stream = devnull + self.runner.stream = devnull def printErrorList(self, flavour, errors): """ @@ -992,107 +1972,47 @@ class VppTestResult(unittest.TestResult): """ for test, err in errors: self.stream.writeln(double_line_delim) - self.stream.writeln("%s: %s" % - (flavour, self.getDescription(test))) + self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(single_line_delim) self.stream.writeln("%s" % err) -class Filter_by_test_option: - def __init__(self, filter_file_name, filter_class_name, filter_func_name): - self.filter_file_name = filter_file_name - self.filter_class_name = filter_class_name - self.filter_func_name = filter_func_name - - def __call__(self, file_name, class_name, func_name): - if self.filter_file_name and file_name != self.filter_file_name: - return False - if self.filter_class_name and class_name != self.filter_class_name: - return False - if self.filter_func_name and func_name != self.filter_func_name: - return False - return True - - class VppTestRunner(unittest.TextTestRunner): """ A basic test runner implementation which prints results to standard error. """ + @property def resultclass(self): """Class maintaining the results of the tests""" return VppTestResult - def __init__(self, keep_alive_pipe=None, failed_pipe=None, - stream=sys.stderr, descriptions=True, - verbosity=1, failfast=False, buffer=False, resultclass=None): + def __init__( + self, + keep_alive_pipe=None, + descriptions=True, + verbosity=1, + result_pipe=None, + failfast=False, + buffer=False, + resultclass=None, + print_summary=True, + **kwargs, + ): # ignore stream setting here, use hard-coded stdout to be in sync # with prints from VppTestCase methods ... - super(VppTestRunner, self).__init__(sys.stdout, descriptions, - verbosity, failfast, buffer, - resultclass) - reporter = KeepAliveReporter() - reporter.pipe = keep_alive_pipe - # this is super-ugly, but very simple to implement and works as long - # as we run only one test at the same time - VppTestResult.test_framework_failed_pipe = failed_pipe - - test_option = "TEST" - - def parse_test_option(self): - try: - f = os.getenv(self.test_option) - except: - f = None - filter_file_name = None - filter_class_name = None - filter_func_name = None - if f: - if '.' in f: - parts = f.split('.') - if len(parts) > 3: - raise Exception("Unrecognized %s option: %s" % - (self.test_option, f)) - if len(parts) > 2: - if parts[2] not in ('*', ''): - filter_func_name = parts[2] - if parts[1] not in ('*', ''): - filter_class_name = parts[1] - if parts[0] not in ('*', ''): - if parts[0].startswith('test_'): - filter_file_name = parts[0] - else: - filter_file_name = 'test_%s' % parts[0] - else: - if f.startswith('test_'): - filter_file_name = f - else: - filter_file_name = 'test_%s' % f - return filter_file_name, filter_class_name, filter_func_name + super(VppTestRunner, self).__init__( + sys.stdout, descriptions, verbosity, failfast, buffer, resultclass, **kwargs + ) + KeepAliveReporter.pipe = keep_alive_pipe - @staticmethod - def filter_tests(tests, filter_cb): - result = unittest.suite.TestSuite() - for t in tests: - if isinstance(t, unittest.suite.TestSuite): - # this is a bunch of tests, recursively filter... - x = filter_tests(t, filter_cb) - if x.countTestCases() > 0: - result.addTest(x) - elif isinstance(t, unittest.TestCase): - # this is a single test - parts = t.id().split('.') - # t.id() for common cases like this: - # test_classifier.TestClassifier.test_acl_ip - # apply filtering only if it is so - if len(parts) == 3: - if not filter_cb(parts[0], parts[1], parts[2]): - continue - result.addTest(t) - else: - # unexpected object, don't touch it - result.addTest(t) - return result + self.orig_stream = self.stream + self.resultclass.test_framework_result_pipe = result_pipe + + self.print_summary = print_summary + + def _makeResult(self): + return self.resultclass(self.stream, self.descriptions, self.verbosity, self) def run(self, test): """ @@ -1102,45 +2022,133 @@ class VppTestRunner(unittest.TextTestRunner): """ faulthandler.enable() # emit stack trace to stderr if killed by signal - print("Running tests using custom test runner") # debug message - filter_file, filter_class, filter_func = self.parse_test_option() - print("Active filters: file=%s, class=%s, function=%s" % ( - filter_file, filter_class, filter_func)) - filter_cb = Filter_by_test_option( - filter_file, filter_class, filter_func) - filtered = self.filter_tests(test, filter_cb) - print("%s out of %s tests match specified filters" % ( - filtered.countTestCases(), test.countTestCases())) - if not running_extended_tests(): - print("Not running extended tests (some tests will be skipped)") - return super(VppTestRunner, self).run(filtered) + + result = super(VppTestRunner, self).run(test) + if not self.print_summary: + self.stream = self.orig_stream + result.stream = self.orig_stream + return result class Worker(Thread): - def __init__(self, args, logger): + def __init__(self, executable_args, logger, env=None, *args, **kwargs): + super(Worker, self).__init__(*args, **kwargs) self.logger = logger - self.args = args + self.args = executable_args + if hasattr(self, "testcase") and self.testcase.debug_all: + if self.testcase.debug_gdbserver: + self.args = [ + "/usr/bin/gdbserver", + "localhost:{port}".format(port=self.testcase.gdbserver_port), + ] + args + elif self.testcase.debug_gdb and hasattr(self, "wait_for_gdb"): + self.args.append(self.wait_for_gdb) + self.app_bin = executable_args[0] + self.app_name = os.path.basename(self.app_bin) + if hasattr(self, "role"): + self.app_name += " {role}".format(role=self.role) + self.process = None self.result = None - super(Worker, self).__init__() + env = {} if env is None else env + self.env = copy.deepcopy(env) + + def wait_for_enter(self): + if not hasattr(self, "testcase"): + return + if self.testcase.debug_all and self.testcase.debug_gdbserver: + print() + print(double_line_delim) + print( + "Spawned GDB Server for '{app}' with PID: {pid}".format( + app=self.app_name, pid=self.process.pid + ) + ) + elif self.testcase.debug_all and self.testcase.debug_gdb: + print() + print(double_line_delim) + print( + "Spawned '{app}' with PID: {pid}".format( + app=self.app_name, pid=self.process.pid + ) + ) + else: + return + print(single_line_delim) + print("You can debug '{app}' using:".format(app=self.app_name)) + if self.testcase.debug_gdbserver: + print( + "sudo gdb " + + self.app_bin + + " -ex 'target remote localhost:{port}'".format( + port=self.testcase.gdbserver_port + ) + ) + print( + "Now is the time to attach gdb by running the above " + "command, set up breakpoints etc., then resume from " + "within gdb by issuing the 'continue' command" + ) + self.testcase.gdbserver_port += 1 + elif self.testcase.debug_gdb: + print( + "sudo gdb " + + self.app_bin + + " -ex 'attach {pid}'".format(pid=self.process.pid) + ) + print( + "Now is the time to attach gdb by running the above " + "command and set up breakpoints etc., then resume from" + " within gdb by issuing the 'continue' command" + ) + print(single_line_delim) + input("Press ENTER to continue running the testcase...") def run(self): executable = self.args[0] - self.logger.debug("Running executable w/args `%s'" % self.args) + if not os.path.exists(executable) or not os.access( + executable, os.F_OK | os.X_OK + ): + # Exit code that means some system file did not exist, + # could not be opened, or had some other kind of error. + self.result = os.EX_OSFILE + raise EnvironmentError( + "executable '%s' is not found or executable." % executable + ) + self.logger.debug( + "Running executable '{app}': '{cmd}'".format( + app=self.app_name, cmd=" ".join(self.args) + ) + ) env = os.environ.copy() + env.update(self.env) env["CK_LOG_FILE_NAME"] = "-" self.process = subprocess.Popen( - self.args, shell=False, env=env, preexec_fn=os.setpgrp, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + ["stdbuf", "-o0", "-e0"] + self.args, + shell=False, + env=env, + preexec_fn=os.setpgrp, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + self.wait_for_enter() out, err = self.process.communicate() - self.logger.debug("Finished running `%s'" % executable) + self.logger.debug("Finished running `{app}'".format(app=self.app_name)) self.logger.info("Return code is `%s'" % self.process.returncode) self.logger.info(single_line_delim) - self.logger.info("Executable `%s' wrote to stdout:" % executable) + self.logger.info( + "Executable `{app}' wrote to stdout:".format(app=self.app_name) + ) self.logger.info(single_line_delim) - self.logger.info(out) + self.logger.info(out.decode("utf-8")) self.logger.info(single_line_delim) - self.logger.info("Executable `%s' wrote to stderr:" % executable) + self.logger.info( + "Executable `{app}' wrote to stderr:".format(app=self.app_name) + ) self.logger.info(single_line_delim) - self.logger.error(err) + self.logger.info(err.decode("utf-8")) self.logger.info(single_line_delim) self.result = self.process.returncode + + +if __name__ == "__main__": + pass