-#!/usr/bin/env python
+#!/usr/bin/env python3
from __future__ import print_function
import gc
import sys
import os
import select
+import signal
import unittest
import tempfile
import time
import faulthandler
import random
import copy
+import psutil
+import platform
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
+
+import scapy.compat
from scapy.packet import Raw
-from hook import StepHook, PollHook, VppDiedError
+import hook as hookmodule
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
+from vpp_bvi_interface import VppBviInterface
from vpp_papi_provider import VppPapiProvider
+from vpp_papi.vpp_stats import VPPStats
+from vpp_papi.vpp_transport_shmem import VppTransportShmemIOError
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
- getLogger, colorize
+ get_logger, colorize
from vpp_object import VppObjectRegistry
-from util import ppp
+from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
+
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
else:
import subprocess
+# Python2/3 compatible
+try:
+ input = raw_input
+except NameError:
+ pass
+
+PASS = 0
+FAIL = 1
+ERROR = 2
+SKIP = 3
+TEST_RUN = 4
+
+
+class BoolEnvironmentVariable(object):
+
+ def __init__(self, env_var_name, default='n', true_values=None):
+ self.name = env_var_name
+ self.default = default
+ self.true_values = true_values if true_values is not None else \
+ ("y", "yes", "1")
+
+ def __bool__(self):
+ return os.getenv(self.name, self.default).lower() in self.true_values
+
+ if sys.version_info[0] == 2:
+ __nonzero__ = __bool__
+
+ def __repr__(self):
+ return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
+ (self.name, self.default, self.true_values)
-debug_framework = False
-if os.getenv('TEST_DEBUG', "0") == "1":
- debug_framework = True
- import debug_internal
+debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
+if debug_framework:
+ import debug_internal
"""
Test framework module.
"""
+class VppDiedError(Exception):
+ """ exception for reporting that the subprocess has died."""
+
+ signals_by_value = {v: k for k, v in signal.__dict__.items() if
+ k.startswith('SIG') and not k.startswith('SIG_')}
+
+ def __init__(self, rv=None, testcase=None, method_name=None):
+ self.rv = rv
+ self.signal_name = None
+ self.testcase = testcase
+ self.method_name = method_name
+
+ try:
+ self.signal_name = VppDiedError.signals_by_value[-rv]
+ except (KeyError, TypeError):
+ pass
+
+ if testcase is None and method_name is None:
+ in_msg = ''
+ else:
+ in_msg = 'running %s.%s ' % (testcase, method_name)
+
+ msg = "VPP subprocess died %sunexpectedly with return code: %d%s." % (
+ in_msg,
+ self.rv,
+ ' [%s]' % (self.signal_name if
+ self.signal_name is not None else ''))
+ super(VppDiedError, self).__init__(msg)
+
+
class _PacketInfo(object):
"""Private class to create packet info object.
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
- while not testclass.pump_thread_stop_flag.wait(0):
+ while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
- split = read.splitlines(True)
+ split = read.decode('ascii',
+ errors='backslashreplace').splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
- split = read.splitlines(True)
+ split = read.decode('ascii',
+ errors='backslashreplace').splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
else:
limit = -1
stderr_fragment = split[-1]
+
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDERR: %s" % line.rstrip("\n"))
- # ignoring the dummy pipe here intentionally - the flag will take care
- # of properly terminating the loop
+ # ignoring the dummy pipe here intentionally - the
+ # flag will take care of properly terminating the loop
+
+
+def _is_skip_aarch64_set():
+ return BoolEnvironmentVariable('SKIP_AARCH64')
+
+
+is_skip_aarch64_set = _is_skip_aarch64_set()
+
+
+def _is_platform_aarch64():
+ return platform.machine() == 'aarch64'
+
+
+is_platform_aarch64 = _is_platform_aarch64()
+
+def _running_extended_tests():
+ return BoolEnvironmentVariable("EXTENDED_TESTS")
-def running_extended_tests():
- s = os.getenv("EXTENDED_TESTS", "n")
- return True if s.lower() in ("y", "yes", "1") else False
+running_extended_tests = _running_extended_tests()
-def running_on_centos():
+
+def _running_on_centos():
os_id = os.getenv("OS_ID", "")
return True if "centos" in os_id.lower() else False
+running_on_centos = _running_on_centos()
+
+
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
def __init__(self):
self.__dict__ = self._shared_state
+ self._pipe = None
@property
def pipe(self):
@pipe.setter
def pipe(self, pipe):
- if hasattr(self, '_pipe'):
+ if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
- def send_keep_alive(self, test):
+ def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
return
if isclass(test):
- desc = test.__name__
+ desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
- desc = test.shortDescription()
- if not desc:
- desc = str(test)
+ desc = test.id()
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
classes. It provides methods to create and run test case.
"""
+ extra_vpp_punt_config = []
+ extra_vpp_plugin_config = []
+
@property
def packet_infos(self):
"""List of packet infos"""
@classmethod
def set_debug_flags(cls, d):
+ cls.gdbserver_port = 7777
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
+ cls.debug_all = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
- elif dl == "gdb":
+ elif dl == "gdb" or dl == "gdb-all":
cls.debug_gdb = True
- elif dl == "gdbserver":
+ elif dl == "gdbserver" or dl == "gdbserver-all":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
+ if dl == "gdb-all" or dl == "gdbserver-all":
+ cls.debug_all = True
+
+ @staticmethod
+ def get_least_used_cpu():
+ cpu_usage_list = [set(range(psutil.cpu_count()))]
+ vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
+ if 'vpp_main' == p.info['name']]
+ for vpp_process in vpp_processes:
+ for cpu_usage_set in cpu_usage_list:
+ try:
+ cpu_num = vpp_process.cpu_num()
+ if cpu_num in cpu_usage_set:
+ cpu_usage_set_index = cpu_usage_list.index(
+ cpu_usage_set)
+ if cpu_usage_set_index == len(cpu_usage_list) - 1:
+ cpu_usage_list.append({cpu_num})
+ else:
+ cpu_usage_list[cpu_usage_set_index + 1].add(
+ cpu_num)
+ cpu_usage_set.remove(cpu_num)
+ break
+ except psutil.NoSuchProcess:
+ pass
+
+ for cpu_usage_set in cpu_usage_list:
+ if len(cpu_usage_set) > 0:
+ min_usage_set = cpu_usage_set
+ break
+
+ return random.choice(tuple(min_usage_set))
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
- s = os.getenv("STEP", "n")
- cls.step = True if s.lower() in ("y", "yes", "1") else False
+ cls.step = BoolEnvironmentVariable('STEP')
d = os.getenv("DEBUG", None)
+ # inverted case to handle '' == True
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
- cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
- cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
+ cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
+ cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
+ cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
+
+ cpu_core_number = cls.get_least_used_cpu()
+ if not hasattr(cls, "worker_config"):
+ cls.worker_config = ""
+
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
- coredump_size, "}", "api-trace", "{", "on", "}",
- "api-segment", "{", "prefix", cls.shm_prefix, "}",
- "plugins", "{", "plugin", "dpdk_plugin.so", "{",
- "disable", "}", "}", ]
+ coredump_size, "runtime-dir", cls.tempdir, "}",
+ "api-trace", "{", "on", "}", "api-segment", "{",
+ "prefix", cls.shm_prefix, "}", "cpu", "{",
+ "main-core", str(cpu_core_number),
+ cls.worker_config, "}",
+ "statseg", "{", "socket-name", cls.stats_sock, "}",
+ "socksvr", "{", "socket-name", cls.api_sock, "}",
+ "plugins",
+ "{", "plugin", "dpdk_plugin.so", "{", "disable",
+ "}", "plugin", "rdma_plugin.so", "{", "disable",
+ "}", "plugin", "unittest_plugin.so", "{", "enable",
+ "}"] + cls.extra_vpp_plugin_config + ["}", ]
+ if cls.extra_vpp_punt_config is not None:
+ cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
- cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline)
+ if cls.test_plugin_path is not None:
+ cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
+
+ cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
+ cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
- print("You can debug the VPP using e.g.:")
+ print("You can debug VPP using:")
if cls.debug_gdbserver:
- print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
- print("Now is the time to attach a gdb by running the above "
- "command, set up breakpoints etc. and then resume VPP from "
+ print("sudo gdb " + cls.vpp_bin +
+ " -ex 'target remote localhost:{port}'"
+ .format(port=cls.gdbserver_port))
+ print("Now is the time to attach gdb by running the above "
+ "command, set up breakpoints etc., then resume VPP from "
"within gdb by issuing the 'continue' command")
+ cls.gdbserver_port += 1
elif cls.debug_gdb:
- print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
- print("Now is the time to attach a gdb by running the above "
- "command and set up breakpoints etc.")
+ print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
+ print("Now is the time to attach gdb by running the above "
+ "command and set up breakpoints etc., then resume VPP from"
+ " within gdb by issuing the 'continue' command")
print(single_line_delim)
- raw_input("Press ENTER to continue running the testcase...")
+ input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
- cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
+ cmdline = [gdbserver, 'localhost:{port}'
+ .format(port=cls.gdbserver_port)] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
+ except subprocess.CalledProcessError as e:
+ cls.logger.critical("Subprocess returned with non-0 return code: ("
+ "%s)", e.returncode)
+ raise
+ except OSError as e:
+ cls.logger.critical("Subprocess returned with OS error: "
+ "(%s) %s", e.errno, e.strerror)
+ raise
except Exception as e:
- cls.logger.critical("Couldn't start vpp: %s" % e)
+ cls.logger.exception("Subprocess returned unexpected from "
+ "%s:", cmdline)
raise
cls.wait_for_enter()
+ @classmethod
+ def wait_for_stats_socket(cls):
+ deadline = time.time() + 300
+ ok = False
+ while time.time() < deadline or \
+ cls.debug_gdb or cls.debug_gdbserver:
+ if os.path.exists(cls.stats_sock):
+ ok = True
+ break
+ cls.sleep(0.8)
+ if not ok:
+ cls.logger.critical("Couldn't stat : {}".format(cls.stats_sock))
+
+ @classmethod
+ def wait_for_coredump(cls):
+ corefile = cls.tempdir + "/core"
+ if os.path.isfile(corefile):
+ cls.logger.error("Waiting for coredump to complete: %s", corefile)
+ curr_size = os.path.getsize(corefile)
+ deadline = time.time() + 60
+ ok = False
+ while time.time() < deadline:
+ cls.sleep(1)
+ size = curr_size
+ curr_size = os.path.getsize(corefile)
+ if size == curr_size:
+ ok = True
+ break
+ if not ok:
+ cls.logger.error("Timed out waiting for coredump to complete:"
+ " %s", corefile)
+ else:
+ cls.logger.error("Coredump complete: %s, size %d",
+ corefile, curr_size)
+
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
+ super(VppTestCase, cls).setUpClass()
gc.collect() # run garbage collection first
- random.seed()
- cls.logger = getLogger(cls.__name__)
+ cls.logger = get_logger(cls.__name__)
+ seed = os.environ["RND_SEED"]
+ random.seed(seed)
+ if hasattr(cls, 'parallel_handler'):
+ cls.logger.addHandler(cls.parallel_handler)
+ cls.logger.propagate = False
+
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
+ cls.stats_sock = "%s/stats.sock" % cls.tempdir
+ cls.api_sock = "%s/api.sock" % cls.tempdir
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
- cls.shm_prefix = cls.tempdir.split("/")[-1]
+ cls.logger.debug("--- setUpClass() for %s called ---" %
+ cls.__name__)
+ cls.shm_prefix = os.path.basename(cls.tempdir)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
+ cls.logger.debug("Random seed is %s" % seed)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
- cls._zombie_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
- cls.reporter.send_keep_alive(cls)
+ cls.reporter.send_keep_alive(cls, 'setUpClass')
+ VppTestResult.current_test_case_info = TestCaseInfo(
+ cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
- cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
+ if cls.debug_gdb or cls.debug_gdbserver:
+ read_timeout = 0
+ else:
+ read_timeout = 5
+ cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
+ read_timeout)
if cls.step:
- hook = StepHook(cls)
+ hook = hookmodule.StepHook(cls)
else:
- hook = PollHook(cls)
+ hook = hookmodule.PollHook(cls)
cls.vapi.register_hook(hook)
- cls.sleep(0.1, "after vpp startup, before initial poll")
+ cls.wait_for_stats_socket()
+ cls.statistics = VPPStats(socketname=cls.stats_sock)
try:
hook.poll_vpp()
except VppDiedError:
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
- except Exception:
- try:
- cls.quit()
- except Exception:
- pass
+ except Exception as e:
+ cls.logger.debug("Exception connecting to VPP: %s" % e)
+
+ cls.quit()
raise
@classmethod
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
+ print()
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
- raw_input("When done debugging, press ENTER to kill the "
- "process and finish running the testcase...")
-
- os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up')
- cls.pump_thread_stop_flag.set()
+ input("When done debugging, press ENTER to kill the "
+ "process and finish running the testcase...")
+
+ # first signal that we want to stop the pump thread, then wake it up
+ if hasattr(cls, 'pump_thread_stop_flag'):
+ cls.pump_thread_stop_flag.set()
+ if hasattr(cls, 'pump_thread_wakeup_pipe'):
+ os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
+ cls.logger.debug("Disconnecting class vapi client on %s",
+ cls.__name__)
cls.vapi.disconnect()
+ cls.logger.debug("Deleting class vapi attribute on %s",
+ cls.__name__)
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
+ cls.wait_for_coredump()
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
+ cls.logger.debug("Deleting class vpp attribute on %s",
+ cls.__name__)
del cls.vpp
if cls.vpp_startup_failed:
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
+ cls.logger.debug("--- tearDownClass() for %s called ---" %
+ cls.__name__)
+ cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
+ def show_commands_at_teardown(self):
+ """ Allow subclass specific teardown logging additions."""
+ self.logger.info("--- No test specific show commands provided. ---")
+
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
- if not self.vpp_dead:
- self.logger.debug(self.vapi.cli("show trace"))
- self.logger.info(self.vapi.ppcli("show interface"))
- self.logger.info(self.vapi.ppcli("show hardware"))
- self.logger.info(self.vapi.ppcli("show error"))
- self.logger.info(self.vapi.ppcli("show run"))
- self.logger.info(self.vapi.ppcli("show log"))
- self.registry.remove_vpp_config(self.logger)
+
+ try:
+ if not self.vpp_dead:
+ self.logger.debug(self.vapi.cli("show trace max 1000"))
+ self.logger.info(self.vapi.ppcli("show interface"))
+ self.logger.info(self.vapi.ppcli("show hardware"))
+ self.logger.info(self.statistics.set_errors_str())
+ self.logger.info(self.vapi.ppcli("show run"))
+ self.logger.info(self.vapi.ppcli("show log"))
+ self.logger.info(self.vapi.ppcli("show bihash"))
+ self.logger.info("Logging testcase specific show commands.")
+ self.show_commands_at_teardown()
+ self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
- api_trace = "vpp_api_trace.%s.log" % self._testMethodName
+ m = self._testMethodName
+ api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
+ except VppTransportShmemIOError:
+ self.logger.debug("VppTransportShmemIOError: Vpp dead. "
+ "Cannot log show commands.")
+ self.vpp_dead = True
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
+ super(VppTestCase, self).setUp()
self.reporter.send_keep_alive(self)
- self.logger.debug("--- setUp() for %s.%s(%s) called ---" %
- (self.__class__.__name__, self._testMethodName,
- self._testMethodDoc))
if self.vpp_dead:
- raise Exception("VPP is dead when setting up the test")
+
+ raise VppDiedError(rv=None, testcase=self.__class__.__name__,
+ method_name=self._testMethodName)
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
- # filter out from zombies
- cls._zombie_captures = [(stamp, name)
- for (stamp, name) in cls._zombie_captures
- if name != cap_name]
@classmethod
- def pg_start(cls):
- """ Remove any zombie captures and enable the packet generator """
- # how long before capture is allowed to be deleted - otherwise vpp
- # crashes - 100ms seems enough (this shouldn't be needed at all)
- capture_ttl = 0.1
- now = time.time()
- for stamp, cap_name in cls._zombie_captures:
- wait = stamp + capture_ttl - now
- if wait > 0:
- cls.sleep(wait, "before deleting capture %s" % cap_name)
- now = time.time()
- cls.logger.debug("Removing zombie capture %s" % cap_name)
- cls.vapi.cli('packet-generator delete %s' % cap_name)
+ def get_vpp_time(cls):
+ return float(cls.vapi.cli('show clock').replace("Time now ", ""))
+
+ @classmethod
+ def sleep_on_vpp_time(cls, sec):
+ """ Sleep according to time in VPP world """
+ # On a busy system with many processes
+ # we might end up with VPP time being slower than real world
+ # So take that into account when waiting for VPP to do something
+ start_time = cls.get_vpp_time()
+ while cls.get_vpp_time() - start_time < sec:
+ cls.sleep(0.1)
- cls.vapi.cli("trace add pg-input 50") # 50 is maximum
+ @classmethod
+ def pg_start(cls):
+ """ Enable the PG, wait till it is done, then clean up """
+ cls.vapi.cli("trace add pg-input 1000")
cls.vapi.cli('packet-generator enable')
- cls._zombie_captures = cls._captures
+ # PG, when starts, runs to completion -
+ # so let's avoid a race condition,
+ # and wait a little till it's done.
+ # Then clean it up - and then be gone.
+ deadline = time.time() + 300
+ while cls.vapi.cli('show packet-generator').find("Yes") != -1:
+ cls.sleep(0.01) # yield
+ if time.time() > deadline:
+ cls.logger.error("Timeout waiting for pg to stop")
+ break
+ for stamp, cap_name in cls._captures:
+ cls.vapi.cli('packet-generator delete %s' % cap_name)
cls._captures = []
@classmethod
- def create_pg_interfaces(cls, interfaces):
+ def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
"""
Create packet-generator interfaces.
"""
result = []
for i in interfaces:
- intf = VppPGInterface(cls, i)
+ intf = VppPGInterface(cls, i, gso, gso_size)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
- def create_loopback_interfaces(cls, interfaces):
+ def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
- :param interfaces: iterable indexes of the interfaces.
+ :param count: number of interfaces created.
:returns: List of created interfaces.
"""
- result = []
- for i in interfaces:
- intf = VppLoInterface(cls, i)
+ result = [VppLoInterface(cls) for i in range(count)]
+ for intf in result:
setattr(cls, intf.name, intf)
- result.append(intf)
cls.lo_interfaces = result
return result
+ @classmethod
+ def create_bvi_interfaces(cls, count):
+ """
+ Create BVI interfaces.
+
+ :param count: number of interfaces created.
+ :returns: List of created interfaces.
+ """
+ result = [VppBviInterface(cls) for i in range(count)]
+ for intf in result:
+ setattr(cls, intf.name, intf)
+ cls.bvi_interfaces = result
+ return result
+
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
- num = (extend / len(padding)) + 1
- packet[Raw].load += (padding * num)[:extend]
+ num = (extend // len(padding)) + 1
+ packet[Raw].load += (padding * num)[:extend].encode("ascii")
@classmethod
def reset_packet_infos(cls):
info.ip, info.proto)
@staticmethod
- def payload_to_info(payload):
+ def payload_to_info(payload, payload_field='load'):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
-
+ :type payload: <class 'scapy.packet.Raw'>
+ :param payload_field: packet fieldname of payload "load" for
+ <class 'scapy.packet.Raw'>
+ :type payload_field: str
:returns: _PacketInfo object containing de-serialized data from payload
"""
- numbers = payload.split()
+ numbers = getattr(payload, payload_field).split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
- received = packet.__class__(str(packet))
- self.logger.debug(
- ppp("Verifying packet checksums for packet:", received))
+ received = packet.__class__(scapy.compat.raw(packet))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
- temp = received.__class__(str(received))
+ temp = received.__class__(scapy.compat.raw(received))
while True:
layer = temp.getlayer(counter)
if layer:
counter = counter + 1
if 0 == len(checksums):
return
- temp = temp.__class__(str(temp))
+ temp = temp.__class__(scapy.compat.raw(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
- recalculated = received_packet.__class__(str(received_packet))
+ recalculated = received_packet.__class__(
+ scapy.compat.raw(received_packet))
delattr(recalculated[layer], field_name)
- recalculated = recalculated.__class__(str(recalculated))
+ recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
+ def get_packet_counter(self, counter):
+ if counter.startswith("/"):
+ counter_value = self.statistics.get_counter(counter)
+ else:
+ counters = self.vapi.cli("sh errors").split('\n')
+ counter_value = 0
+ for i in range(1, len(counters) - 1):
+ results = counters[i].split()
+ if results[1] == counter:
+ counter_value = int(results[0])
+ break
+ return counter_value
+
+ def assert_packet_counter_equal(self, counter, expected_value):
+ counter_value = self.get_packet_counter(counter)
+ self.assert_equal(counter_value, expected_value,
+ "packet counter `%s'" % counter)
+
+ def assert_error_counter_equal(self, counter, expected_value):
+ counter_value = self.statistics.get_err_counter(counter)
+ self.assert_equal(counter_value, expected_value,
+ "error counter `%s'" % counter)
+
@classmethod
def sleep(cls, timeout, remark=None):
+
+ # /* Allow sleep(0) to maintain win32 semantics, and as decreed
+ # * by Guido, only the main thread can be interrupted.
+ # */
+ # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
+ if timeout == 0:
+ # yield quantum
+ if hasattr(os, 'sched_yield'):
+ os.sched_yield()
+ else:
+ time.sleep(0)
+ return
+
if hasattr(cls, 'logger'):
- cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark))
+ cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
before = time.time()
time.sleep(timeout)
after = time.time()
- if after - before > 2 * timeout:
- cls.logger.error("unexpected time.sleep() result - "
- "slept for %ss instead of ~%ss!" % (
- after - before, timeout))
+ if hasattr(cls, 'logger') and after - before > 2 * timeout:
+ cls.logger.error("unexpected self.sleep() result - "
+ "slept for %es instead of ~%es!",
+ after - before, timeout)
if hasattr(cls, 'logger'):
cls.logger.debug(
- "Finished sleep (%s) - slept %ss (wanted %ss)" % (
- remark, after - before, timeout))
+ "Finished sleep (%s) - slept %es (wanted %es)",
+ remark, after - before, timeout)
- def send_and_assert_no_replies(self, intf, pkts, remark=""):
+ def pg_send(self, intf, pkts):
self.vapi.cli("clear trace")
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- timeout = 1
+
+ def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
+ self.pg_send(intf, pkts)
+ if not timeout:
+ timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
- def send_and_expect(self, input, pkts, output):
- self.vapi.cli("clear trace")
- input.add_stream(pkts)
- self.pg_enable_capture(self.pg_interfaces)
- self.pg_start()
+ def send_and_expect(self, intf, pkts, output, n_rx=None):
+ if not n_rx:
+ n_rx = len(pkts)
+ self.pg_send(intf, pkts)
+ rx = output.get_capture(n_rx)
+ return rx
+
+ def send_and_expect_only(self, intf, pkts, output, timeout=None):
+ self.pg_send(intf, pkts)
rx = output.get_capture(len(pkts))
+ outputs = [output]
+ if not timeout:
+ timeout = 1
+ for i in self.pg_interfaces:
+ if i not in outputs:
+ i.get_capture(0, timeout=timeout)
+ i.assert_nothing_captured()
+ timeout = 0.1
+
return rx
+ def runTest(self):
+ """ unittest calls runTest when TestCase is instantiated without a
+ test case. Use case: Writing unittests against VppTestCase"""
+ pass
-class TestCasePrinter(object):
- _shared_state = {}
- def __init__(self):
- self.__dict__ = self._shared_state
- if not hasattr(self, "_test_case_set"):
- self._test_case_set = set()
+def get_testcase_doc_name(test):
+ return getdoc(test.__class__).splitlines()[0]
- def print_test_case_heading_if_first_time(self, case):
- if case.__class__ not in self._test_case_set:
- print(double_line_delim)
- print(colorize(getdoc(case.__class__).splitlines()[0], YELLOW))
- print(double_line_delim)
- self._test_case_set.add(case.__class__)
+
+def get_test_description(descriptions, test):
+ short_description = test.shortDescription()
+ if descriptions and short_description:
+ return short_description
+ else:
+ return str(test)
+
+
+class TestCaseInfo(object):
+ def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
+ self.logger = logger
+ self.tempdir = tempdir
+ self.vpp_pid = vpp_pid
+ self.vpp_bin_path = vpp_bin_path
+ self.core_crash_test = None
class VppTestResult(unittest.TestResult):
methods.
"""
- def __init__(self, stream, descriptions, verbosity):
+ failed_test_cases_info = set()
+ core_crash_test_cases_info = set()
+ current_test_case_info = None
+
+ def __init__(self, stream=None, descriptions=None, verbosity=None,
+ runner=None):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
- unittest.TestResult.__init__(self, stream, descriptions, verbosity)
+ super(VppTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
- self.printer = TestCasePrinter()
+ self.runner = runner
def addSuccess(self, test):
"""
:param test:
"""
- if hasattr(test, 'logger'):
- test.logger.debug("--- addSuccess() %s.%s(%s) called"
- % (test.__class__.__name__,
- test._testMethodName,
- test._testMethodDoc))
+ if self.current_test_case_info:
+ self.current_test_case_info.logger.debug(
+ "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
+ test._testMethodName,
+ test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
+ self.send_result_through_pipe(test, PASS)
+
def addSkip(self, test, reason):
"""
Record a test skipped.
:param reason:
"""
- if hasattr(test, 'logger'):
- test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s"
- % (test.__class__.__name__,
- test._testMethodName,
- test._testMethodDoc,
- reason))
+ if self.current_test_case_info:
+ self.current_test_case_info.logger.debug(
+ "--- addSkip() %s.%s(%s) called, reason is %s" %
+ (test.__class__.__name__, test._testMethodName,
+ test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
- def symlink_failed(self, test):
- logger = None
- if hasattr(test, 'logger'):
- logger = test.logger
- if hasattr(test, 'tempdir'):
+ self.send_result_through_pipe(test, SKIP)
+
+ def symlink_failed(self):
+ if self.current_test_case_info:
try:
- failed_dir = os.getenv('VPP_TEST_FAILED_DIR')
- link_path = '%s/%s-FAILED' % (failed_dir,
- test.tempdir.split("/")[-1])
- if logger:
- logger.debug("creating a link to the failed test")
- logger.debug("os.symlink(%s, %s)" %
- (test.tempdir, link_path))
- os.symlink(test.tempdir, link_path)
+ failed_dir = os.getenv('FAILED_DIR')
+ link_path = os.path.join(
+ failed_dir,
+ '%s-FAILED' %
+ os.path.basename(self.current_test_case_info.tempdir))
+ if self.current_test_case_info.logger:
+ self.current_test_case_info.logger.debug(
+ "creating a link to the failed test")
+ self.current_test_case_info.logger.debug(
+ "os.symlink(%s, %s)" %
+ (self.current_test_case_info.tempdir, link_path))
+ if os.path.exists(link_path):
+ if self.current_test_case_info.logger:
+ self.current_test_case_info.logger.debug(
+ 'symlink already exists')
+ else:
+ os.symlink(self.current_test_case_info.tempdir, link_path)
+
except Exception as e:
- if logger:
- logger.error(e)
+ if self.current_test_case_info.logger:
+ self.current_test_case_info.logger.error(e)
- def send_failure_through_pipe(self, test):
- if hasattr(self, 'test_framework_failed_pipe'):
- pipe = self.test_framework_failed_pipe
+ def send_result_through_pipe(self, test, result):
+ if hasattr(self, 'test_framework_result_pipe'):
+ pipe = self.test_framework_result_pipe
if pipe:
- pipe.send(test.__class__)
+ pipe.send((test.id(), result))
+
+ def log_error(self, test, err, fn_name):
+ if self.current_test_case_info:
+ if isinstance(test, unittest.suite._ErrorHolder):
+ test_name = test.description
+ else:
+ test_name = '%s.%s(%s)' % (test.__class__.__name__,
+ test._testMethodName,
+ test._testMethodDoc)
+ self.current_test_case_info.logger.debug(
+ "--- %s() %s called, err is %s" %
+ (fn_name, test_name, err))
+ self.current_test_case_info.logger.debug(
+ "formatted exception is:\n%s" %
+ "".join(format_exception(*err)))
+
+ def add_error(self, test, err, unittest_fn, error_type):
+ if error_type == FAIL:
+ self.log_error(test, err, 'addFailure')
+ error_type_str = colorize("FAIL", RED)
+ elif error_type == ERROR:
+ self.log_error(test, err, 'addError')
+ error_type_str = colorize("ERROR", RED)
+ else:
+ raise Exception('Error type %s cannot be used to record an '
+ 'error or a failure' % error_type)
+
+ unittest_fn(self, test, err)
+ if self.current_test_case_info:
+ self.result_string = "%s [ temp dir used by test case: %s ]" % \
+ (error_type_str,
+ self.current_test_case_info.tempdir)
+ self.symlink_failed()
+ self.failed_test_cases_info.add(self.current_test_case_info)
+ if is_core_present(self.current_test_case_info.tempdir):
+ if not self.current_test_case_info.core_crash_test:
+ if isinstance(test, unittest.suite._ErrorHolder):
+ test_name = str(test)
+ else:
+ test_name = "'{!s}' ({!s})".format(
+ get_testcase_doc_name(test), test.id())
+ self.current_test_case_info.core_crash_test = test_name
+ self.core_crash_test_cases_info.add(
+ self.current_test_case_info)
+ else:
+ self.result_string = '%s [no temp dir]' % error_type_str
+
+ self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
:param err: error message
"""
- if hasattr(test, 'logger'):
- test.logger.debug("--- addFailure() %s.%s(%s) called, err is %s"
- % (test.__class__.__name__,
- test._testMethodName,
- test._testMethodDoc, err))
- test.logger.debug("formatted exception is:\n%s" %
- "".join(format_exception(*err)))
- unittest.TestResult.addFailure(self, test, err)
- if hasattr(test, 'tempdir'):
- self.result_string = colorize("FAIL", RED) + \
- ' [ temp dir used by test case: ' + test.tempdir + ' ]'
- self.symlink_failed(test)
- else:
- self.result_string = colorize("FAIL", RED) + ' [no temp dir]'
-
- self.send_failure_through_pipe(test)
+ self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
:param err: error message
"""
- if hasattr(test, 'logger'):
- test.logger.debug("--- addError() %s.%s(%s) called, err is %s"
- % (test.__class__.__name__,
- test._testMethodName,
- test._testMethodDoc, err))
- test.logger.debug("formatted exception is:\n%s" %
- "".join(format_exception(*err)))
- unittest.TestResult.addError(self, test, err)
- if hasattr(test, 'tempdir'):
- self.result_string = colorize("ERROR", RED) + \
- ' [ temp dir used by test case: ' + test.tempdir + ' ]'
- self.symlink_failed(test)
- else:
- self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
-
- self.send_failure_through_pipe(test)
+ self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
:returns: test description
"""
- # TODO: if none print warning not raise exception
- short_description = test.shortDescription()
- if self.descriptions and short_description:
- return short_description
- else:
- return str(test)
+ return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
:param test:
"""
- self.printer.print_test_case_heading_if_first_time(test)
+
+ def print_header(test):
+ if not hasattr(test.__class__, '_header_printed'):
+ print(double_line_delim)
+ print(colorize(getdoc(test).splitlines()[0], GREEN))
+ print(double_line_delim)
+ test.__class__._header_printed = True
+
+ print_header(test)
+
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
def stopTest(self, test):
"""
- Stop a test
+ Called when the given test has been run
:param test:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
+ self.send_result_through_pipe(test, TEST_RUN)
+
def printErrors(self):
"""
Print errors from running the test case
"""
- self.stream.writeln()
- self.printErrorList('ERROR', self.errors)
- self.printErrorList('FAIL', self.failures)
+ if len(self.errors) > 0 or len(self.failures) > 0:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+
+ # ^^ that is the last output from unittest before summary
+ if not self.runner.print_summary:
+ devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
+ self.stream = devnull
+ self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
self.stream.writeln("%s" % err)
-class Filter_by_test_option:
- def __init__(self, filter_file_name, filter_class_name, filter_func_name):
- self.filter_file_name = filter_file_name
- self.filter_class_name = filter_class_name
- self.filter_func_name = filter_func_name
-
- def __call__(self, file_name, class_name, func_name):
- if self.filter_file_name and file_name != self.filter_file_name:
- return False
- if self.filter_class_name and class_name != self.filter_class_name:
- return False
- if self.filter_func_name and func_name != self.filter_func_name:
- return False
- return True
-
-
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
+
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
- def __init__(self, keep_alive_pipe=None, failed_pipe=None,
- stream=sys.stderr, descriptions=True,
- verbosity=1, failfast=False, buffer=False, resultclass=None):
+ def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
+ result_pipe=None, failfast=False, buffer=False,
+ resultclass=None, print_summary=True, **kwargs):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
- resultclass)
- reporter = KeepAliveReporter()
- reporter.pipe = keep_alive_pipe
- # this is super-ugly, but very simple to implement and works as long
- # as we run only one test at the same time
- VppTestResult.test_framework_failed_pipe = failed_pipe
-
- test_option = "TEST"
-
- def parse_test_option(self):
- f = os.getenv(self.test_option, None)
- filter_file_name = None
- filter_class_name = None
- filter_func_name = None
- if f:
- if '.' in f:
- parts = f.split('.')
- if len(parts) > 3:
- raise Exception("Unrecognized %s option: %s" %
- (self.test_option, f))
- if len(parts) > 2:
- if parts[2] not in ('*', ''):
- filter_func_name = parts[2]
- if parts[1] not in ('*', ''):
- filter_class_name = parts[1]
- if parts[0] not in ('*', ''):
- if parts[0].startswith('test_'):
- filter_file_name = parts[0]
- else:
- filter_file_name = 'test_%s' % parts[0]
- else:
- if f.startswith('test_'):
- filter_file_name = f
- else:
- filter_file_name = 'test_%s' % f
- return filter_file_name, filter_class_name, filter_func_name
+ resultclass, **kwargs)
+ KeepAliveReporter.pipe = keep_alive_pipe
- @staticmethod
- def filter_tests(tests, filter_cb):
- result = unittest.suite.TestSuite()
- for t in tests:
- if isinstance(t, unittest.suite.TestSuite):
- # this is a bunch of tests, recursively filter...
- x = VppTestRunner.filter_tests(t, filter_cb)
- if x.countTestCases() > 0:
- result.addTest(x)
- elif isinstance(t, unittest.TestCase):
- # this is a single test
- parts = t.id().split('.')
- # t.id() for common cases like this:
- # test_classifier.TestClassifier.test_acl_ip
- # apply filtering only if it is so
- if len(parts) == 3:
- if not filter_cb(parts[0], parts[1], parts[2]):
- continue
- result.addTest(t)
- else:
- # unexpected object, don't touch it
- result.addTest(t)
- return result
+ self.orig_stream = self.stream
+ self.resultclass.test_framework_result_pipe = result_pipe
+
+ self.print_summary = print_summary
+
+ def _makeResult(self):
+ return self.resultclass(self.stream,
+ self.descriptions,
+ self.verbosity,
+ self)
def run(self, test):
"""
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
- print("Running tests using custom test runner") # debug message
- filter_file, filter_class, filter_func = self.parse_test_option()
- print("Active filters: file=%s, class=%s, function=%s" % (
- filter_file, filter_class, filter_func))
- filter_cb = Filter_by_test_option(
- filter_file, filter_class, filter_func)
- filtered = self.filter_tests(test, filter_cb)
- print("%s out of %s tests match specified filters" % (
- filtered.countTestCases(), test.countTestCases()))
- if not running_extended_tests():
- print("Not running extended tests (some tests will be skipped)")
- return super(VppTestRunner, self).run(filtered)
+
+ result = super(VppTestRunner, self).run(test)
+ if not self.print_summary:
+ self.stream = self.orig_stream
+ result.stream = self.orig_stream
+ return result
class Worker(Thread):
- def __init__(self, args, logger, env={}):
+ def __init__(self, args, logger, env=None):
self.logger = logger
self.args = args
+ if hasattr(self, 'testcase') and self.testcase.debug_all:
+ if self.testcase.debug_gdbserver:
+ self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
+ .format(port=self.testcase.gdbserver_port)] + args
+ elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
+ self.args.append(self.wait_for_gdb)
+ self.app_bin = args[0]
+ self.app_name = os.path.basename(self.app_bin)
+ if hasattr(self, 'role'):
+ self.app_name += ' {role}'.format(role=self.role)
+ self.process = None
self.result = None
+ env = {} if env is None else env
self.env = copy.deepcopy(env)
super(Worker, self).__init__()
+ def wait_for_enter(self):
+ if not hasattr(self, 'testcase'):
+ return
+ if self.testcase.debug_all and self.testcase.debug_gdbserver:
+ print()
+ print(double_line_delim)
+ print("Spawned GDB Server for '{app}' with PID: {pid}"
+ .format(app=self.app_name, pid=self.process.pid))
+ elif self.testcase.debug_all and self.testcase.debug_gdb:
+ print()
+ print(double_line_delim)
+ print("Spawned '{app}' with PID: {pid}"
+ .format(app=self.app_name, pid=self.process.pid))
+ else:
+ return
+ print(single_line_delim)
+ print("You can debug '{app}' using:".format(app=self.app_name))
+ if self.testcase.debug_gdbserver:
+ print("sudo gdb " + self.app_bin +
+ " -ex 'target remote localhost:{port}'"
+ .format(port=self.testcase.gdbserver_port))
+ print("Now is the time to attach gdb by running the above "
+ "command, set up breakpoints etc., then resume from "
+ "within gdb by issuing the 'continue' command")
+ self.testcase.gdbserver_port += 1
+ elif self.testcase.debug_gdb:
+ print("sudo gdb " + self.app_bin +
+ " -ex 'attach {pid}'".format(pid=self.process.pid))
+ print("Now is the time to attach gdb by running the above "
+ "command and set up breakpoints etc., then resume from"
+ " within gdb by issuing the 'continue' command")
+ print(single_line_delim)
+ input("Press ENTER to continue running the testcase...")
+
def run(self):
executable = self.args[0]
- self.logger.debug("Running executable w/args `%s'" % self.args)
+ if not os.path.exists(executable) or not os.access(
+ executable, os.F_OK | os.X_OK):
+ # Exit code that means some system file did not exist,
+ # could not be opened, or had some other kind of error.
+ self.result = os.EX_OSFILE
+ raise EnvironmentError(
+ "executable '%s' is not found or executable." % executable)
+ self.logger.debug("Running executable: '{app}'"
+ .format(app=' '.join(self.args)))
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ self.wait_for_enter()
out, err = self.process.communicate()
- self.logger.debug("Finished running `%s'" % executable)
+ self.logger.debug("Finished running `{app}'".format(app=self.app_name))
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
- self.logger.info("Executable `%s' wrote to stdout:" % executable)
+ self.logger.info("Executable `{app}' wrote to stdout:"
+ .format(app=self.app_name))
self.logger.info(single_line_delim)
- self.logger.info(out)
+ self.logger.info(out.decode('utf-8'))
self.logger.info(single_line_delim)
- self.logger.info("Executable `%s' wrote to stderr:" % executable)
+ self.logger.info("Executable `{app}' wrote to stderr:"
+ .format(app=self.app_name))
self.logger.info(single_line_delim)
- self.logger.info(err)
+ self.logger.info(err.decode('utf-8'))
self.logger.info(single_line_delim)
self.result = self.process.returncode
+
+
+if __name__ == '__main__':
+ pass