3 from __future__ import print_function
19 from collections import deque
20 from threading import Thread, Event
21 from inspect import getdoc, isclass
22 from traceback import format_exception
23 from logging import FileHandler, DEBUG, Formatter
25 from abc import ABC, abstractmethod
28 from scapy.packet import Raw
29 import hook as hookmodule
30 from vpp_pg_interface import VppPGInterface
31 from vpp_sub_interface import VppSubInterface
32 from vpp_lo_interface import VppLoInterface
33 from vpp_bvi_interface import VppBviInterface
34 from vpp_papi_provider import VppPapiProvider
36 from vpp_papi.vpp_stats import VPPStats
37 from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
38 from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
40 from vpp_object import VppObjectRegistry
41 from util import ppp, is_core_present
42 from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
43 from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
44 from scapy.layers.inet6 import ICMPv6EchoReply
46 from cpu_config import available_cpus, num_cpus, max_vpp_cpus
48 logger = logging.getLogger(__name__)
50 # Set up an empty logger for the testcase that can be overridden as necessary
51 null_logger = logging.getLogger('VppTestCase')
52 null_logger.addHandler(logging.NullHandler())
62 class BoolEnvironmentVariable(object):
64 def __init__(self, env_var_name, default='n', true_values=None):
65 self.name = env_var_name
66 self.default = default
67 self.true_values = true_values if true_values is not None else \
71 return os.getenv(self.name, self.default).lower() in self.true_values
73 if sys.version_info[0] == 2:
74 __nonzero__ = __bool__
77 return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
78 (self.name, self.default, self.true_values)
81 debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
86 Test framework module.
88 The module provides a set of tools for constructing and running tests and
89 representing the results.
93 class VppDiedError(Exception):
94 """ exception for reporting that the subprocess has died."""
96 signals_by_value = {v: k for k, v in signal.__dict__.items() if
97 k.startswith('SIG') and not k.startswith('SIG_')}
99 def __init__(self, rv=None, testcase=None, method_name=None):
101 self.signal_name = None
102 self.testcase = testcase
103 self.method_name = method_name
106 self.signal_name = VppDiedError.signals_by_value[-rv]
107 except (KeyError, TypeError):
110 if testcase is None and method_name is None:
113 in_msg = ' while running %s.%s' % (testcase, method_name)
116 msg = "VPP subprocess died unexpectedly%s with return code: %d%s."\
117 % (in_msg, self.rv, ' [%s]' %
119 self.signal_name is not None else ''))
121 msg = "VPP subprocess died unexpectedly%s." % in_msg
123 super(VppDiedError, self).__init__(msg)
126 class _PacketInfo(object):
127 """Private class to create packet info object.
129 Help process information about the next packet.
130 Set variables to default values.
132 #: Store the index of the packet.
134 #: Store the index of the source packet generator interface of the packet.
136 #: Store the index of the destination packet generator interface
139 #: Store expected ip version
141 #: Store expected upper protocol
143 #: Store the copy of the former packet.
146 def __eq__(self, other):
147 index = self.index == other.index
148 src = self.src == other.src
149 dst = self.dst == other.dst
150 data = self.data == other.data
151 return index and src and dst and data
154 def pump_output(testclass):
155 """ pump output from vpp stdout/stderr to proper queues """
158 while not testclass.pump_thread_stop_flag.is_set():
159 readable = select.select([testclass.vpp.stdout.fileno(),
160 testclass.vpp.stderr.fileno(),
161 testclass.pump_thread_wakeup_pipe[0]],
163 if testclass.vpp.stdout.fileno() in readable:
164 read = os.read(testclass.vpp.stdout.fileno(), 102400)
166 split = read.decode('ascii',
167 errors='backslashreplace').splitlines(True)
168 if len(stdout_fragment) > 0:
169 split[0] = "%s%s" % (stdout_fragment, split[0])
170 if len(split) > 0 and split[-1].endswith("\n"):
174 stdout_fragment = split[-1]
175 testclass.vpp_stdout_deque.extend(split[:limit])
176 if not testclass.cache_vpp_output:
177 for line in split[:limit]:
178 testclass.logger.info(
179 "VPP STDOUT: %s" % line.rstrip("\n"))
180 if testclass.vpp.stderr.fileno() in readable:
181 read = os.read(testclass.vpp.stderr.fileno(), 102400)
183 split = read.decode('ascii',
184 errors='backslashreplace').splitlines(True)
185 if len(stderr_fragment) > 0:
186 split[0] = "%s%s" % (stderr_fragment, split[0])
187 if len(split) > 0 and split[-1].endswith("\n"):
191 stderr_fragment = split[-1]
193 testclass.vpp_stderr_deque.extend(split[:limit])
194 if not testclass.cache_vpp_output:
195 for line in split[:limit]:
196 testclass.logger.error(
197 "VPP STDERR: %s" % line.rstrip("\n"))
198 # ignoring the dummy pipe here intentionally - the
199 # flag will take care of properly terminating the loop
202 def _is_skip_aarch64_set():
203 return BoolEnvironmentVariable('SKIP_AARCH64')
206 is_skip_aarch64_set = _is_skip_aarch64_set()
209 def _is_platform_aarch64():
210 return platform.machine() == 'aarch64'
213 is_platform_aarch64 = _is_platform_aarch64()
216 def _running_extended_tests():
217 return BoolEnvironmentVariable("EXTENDED_TESTS")
220 running_extended_tests = _running_extended_tests()
223 def _running_gcov_tests():
224 return BoolEnvironmentVariable("GCOV_TESTS")
227 running_gcov_tests = _running_gcov_tests()
230 def get_environ_vpp_worker_count():
231 worker_config = os.getenv("VPP_WORKER_CONFIG", None)
233 elems = worker_config.split(" ")
234 if elems[0] != "workers" or len(elems) != 2:
235 raise ValueError("Wrong VPP_WORKER_CONFIG == '%s' value." %
242 environ_vpp_worker_count = get_environ_vpp_worker_count()
245 class KeepAliveReporter(object):
247 Singleton object which reports test start to parent process
252 self.__dict__ = self._shared_state
260 def pipe(self, pipe):
261 if self._pipe is not None:
262 raise Exception("Internal error - pipe should only be set once.")
265 def send_keep_alive(self, test, desc=None):
267 Write current test tmpdir & desc to keep-alive pipe to signal liveness
269 if self.pipe is None:
270 # if not running forked..
274 desc = '%s (%s)' % (desc, unittest.util.strclass(test))
278 self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
281 class TestCaseTag(Enum):
282 # marks the suites that must run at the end
283 # using only a single test runner
285 # marks the suites broken on VPP multi-worker
286 FIXME_VPP_WORKERS = 2
289 def create_tag_decorator(e):
292 cls.test_tags.append(e)
293 except AttributeError:
299 tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
300 tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
314 class CPUInterface(ABC):
316 skipped_due_to_cpu_lack = False
320 def get_cpus_required(cls):
324 def assign_cpus(cls, cpus):
328 class VppTestCase(CPUInterface, unittest.TestCase):
329 """This subclass is a base class for VPP test cases that are implemented as
330 classes. It provides methods to create and run test case.
333 extra_vpp_statseg_config = ""
334 extra_vpp_punt_config = []
335 extra_vpp_plugin_config = []
337 vapi_response_timeout = 5
340 def packet_infos(self):
341 """List of packet infos"""
342 return self._packet_infos
345 def get_packet_count_for_if_idx(cls, dst_if_index):
346 """Get the number of packet info for specified destination if index"""
347 if dst_if_index in cls._packet_count_for_dst_if_idx:
348 return cls._packet_count_for_dst_if_idx[dst_if_index]
353 def has_tag(cls, tag):
354 """ if the test case has a given tag - return true """
356 return tag in cls.test_tags
357 except AttributeError:
362 def is_tagged_run_solo(cls):
363 """ if the test case class is timing-sensitive - return true """
364 return cls.has_tag(TestCaseTag.RUN_SOLO)
368 """Return the instance of this testcase"""
369 return cls.test_instance
372 def set_debug_flags(cls, d):
373 cls.gdbserver_port = 7777
374 cls.debug_core = False
375 cls.debug_gdb = False
376 cls.debug_gdbserver = False
377 cls.debug_all = False
378 cls.debug_attach = False
383 cls.debug_core = True
384 elif dl == "gdb" or dl == "gdb-all":
386 elif dl == "gdbserver" or dl == "gdbserver-all":
387 cls.debug_gdbserver = True
389 cls.debug_attach = True
391 raise Exception("Unrecognized DEBUG option: '%s'" % d)
392 if dl == "gdb-all" or dl == "gdbserver-all":
396 def get_vpp_worker_count(cls):
397 if not hasattr(cls, "vpp_worker_count"):
398 if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
399 cls.vpp_worker_count = 0
401 cls.vpp_worker_count = environ_vpp_worker_count
402 return cls.vpp_worker_count
405 def get_cpus_required(cls):
406 return 1 + cls.get_vpp_worker_count()
409 def setUpConstants(cls):
410 """ Set-up the test case class based on environment variables """
411 cls.step = BoolEnvironmentVariable('STEP')
412 # inverted case to handle '' == True
413 c = os.getenv("CACHE_OUTPUT", "1")
414 cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
415 cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
416 cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
417 cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
418 cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
420 if cls.plugin_path is not None:
421 if cls.extern_plugin_path is not None:
422 plugin_path = "%s:%s" % (
423 cls.plugin_path, cls.extern_plugin_path)
425 plugin_path = cls.plugin_path
426 elif cls.extern_plugin_path is not None:
427 plugin_path = cls.extern_plugin_path
429 if cls.step or cls.debug_gdb or cls.debug_gdbserver:
430 debug_cli = "cli-listen localhost:5002"
432 size = os.getenv("COREDUMP_SIZE")
434 coredump_size = "coredump-size %s" % size
435 if coredump_size is None:
436 coredump_size = "coredump-size unlimited"
438 default_variant = os.getenv("VARIANT")
439 if default_variant is not None:
440 default_variant = "defaults { %s 100 }" % default_variant
444 api_fuzzing = os.getenv("API_FUZZ")
445 if api_fuzzing is None:
450 "unix", "{", "nodaemon", debug_cli, "full-coredump",
451 coredump_size, "runtime-dir", cls.tempdir, "}",
452 "api-trace", "{", "on", "}",
453 "api-segment", "{", "prefix", cls.get_api_segment_prefix(), "}",
454 "cpu", "{", "main-core", str(cls.cpus[0]), ]
455 if cls.get_vpp_worker_count():
456 cls.vpp_cmdline.extend([
457 "corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])])
458 cls.vpp_cmdline.extend([
460 "physmem", "{", "max-size", "32m", "}",
461 "statseg", "{", "socket-name", cls.get_stats_sock_path(),
462 cls.extra_vpp_statseg_config, "}",
463 "socksvr", "{", "socket-name", cls.get_api_sock_path(), "}",
464 "node { ", default_variant, "}",
465 "api-fuzz {", api_fuzzing, "}",
466 "plugins", "{", "plugin", "dpdk_plugin.so", "{", "disable", "}",
467 "plugin", "rdma_plugin.so", "{", "disable", "}",
468 "plugin", "lisp_unittest_plugin.so", "{", "enable", "}",
469 "plugin", "unittest_plugin.so", "{", "enable", "}"
470 ] + cls.extra_vpp_plugin_config + ["}", ])
472 if cls.extra_vpp_punt_config is not None:
473 cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
474 if plugin_path is not None:
475 cls.vpp_cmdline.extend(["plugin_path", plugin_path])
476 if cls.test_plugin_path is not None:
477 cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
479 if not cls.debug_attach:
480 cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
481 cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
484 def wait_for_enter(cls):
485 if cls.debug_gdbserver:
486 print(double_line_delim)
487 print("Spawned GDB server with PID: %d" % cls.vpp.pid)
489 print(double_line_delim)
490 print("Spawned VPP with PID: %d" % cls.vpp.pid)
492 cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
494 print(single_line_delim)
495 print("You can debug VPP using:")
496 if cls.debug_gdbserver:
497 print("sudo gdb " + cls.vpp_bin +
498 " -ex 'target remote localhost:{port}'"
499 .format(port=cls.gdbserver_port))
500 print("Now is the time to attach gdb by running the above "
501 "command, set up breakpoints etc., then resume VPP from "
502 "within gdb by issuing the 'continue' command")
503 cls.gdbserver_port += 1
505 print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
506 print("Now is the time to attach gdb by running the above "
507 "command and set up breakpoints etc., then resume VPP from"
508 " within gdb by issuing the 'continue' command")
509 print(single_line_delim)
510 input("Press ENTER to continue running the testcase...")
518 cls.logger.debug(f"Assigned cpus: {cls.cpus}")
519 cmdline = cls.vpp_cmdline
521 if cls.debug_gdbserver:
522 gdbserver = '/usr/bin/gdbserver'
523 if not os.path.isfile(gdbserver) or\
524 not os.access(gdbserver, os.X_OK):
525 raise Exception("gdbserver binary '%s' does not exist or is "
526 "not executable" % gdbserver)
528 cmdline = [gdbserver, 'localhost:{port}'
529 .format(port=cls.gdbserver_port)] + cls.vpp_cmdline
530 cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
533 cls.vpp = subprocess.Popen(cmdline,
534 stdout=subprocess.PIPE,
535 stderr=subprocess.PIPE)
536 except subprocess.CalledProcessError as e:
537 cls.logger.critical("Subprocess returned with non-0 return code: ("
541 cls.logger.critical("Subprocess returned with OS error: "
542 "(%s) %s", e.errno, e.strerror)
544 except Exception as e:
545 cls.logger.exception("Subprocess returned unexpected from "
552 def wait_for_coredump(cls):
553 corefile = cls.tempdir + "/core"
554 if os.path.isfile(corefile):
555 cls.logger.error("Waiting for coredump to complete: %s", corefile)
556 curr_size = os.path.getsize(corefile)
557 deadline = time.time() + 60
559 while time.time() < deadline:
562 curr_size = os.path.getsize(corefile)
563 if size == curr_size:
567 cls.logger.error("Timed out waiting for coredump to complete:"
570 cls.logger.error("Coredump complete: %s, size %d",
574 def get_stats_sock_path(cls):
575 return "%s/stats.sock" % cls.tempdir
578 def get_api_sock_path(cls):
579 return "%s/api.sock" % cls.tempdir
582 def get_api_segment_prefix(cls):
583 return os.path.basename(cls.tempdir) # Only used for VAPI
586 def get_tempdir(cls):
588 return os.getenv("VPP_IN_GDB_TMP_DIR",
589 "/tmp/unittest-attach-gdb")
591 return tempfile.mkdtemp(prefix='vpp-unittest-%s-' % cls.__name__)
596 Perform class setup before running the testcase
597 Remove shared memory files, start vpp and connect the vpp-api
599 super(VppTestCase, cls).setUpClass()
600 cls.logger = get_logger(cls.__name__)
601 seed = os.environ["RND_SEED"]
603 if hasattr(cls, 'parallel_handler'):
604 cls.logger.addHandler(cls.parallel_handler)
605 cls.logger.propagate = False
606 d = os.getenv("DEBUG", None)
607 cls.set_debug_flags(d)
608 cls.tempdir = cls.get_tempdir()
609 cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
610 cls.file_handler.setFormatter(
611 Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
613 cls.file_handler.setLevel(DEBUG)
614 cls.logger.addHandler(cls.file_handler)
615 cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__)
616 os.chdir(cls.tempdir)
617 cls.logger.info("Temporary dir is %s, api socket is %s",
618 cls.tempdir, cls.get_api_sock_path())
619 cls.logger.debug("Random seed is %s", seed)
621 cls.reset_packet_infos()
626 cls.registry = VppObjectRegistry()
627 cls.vpp_startup_failed = False
628 cls.reporter = KeepAliveReporter()
629 # need to catch exceptions here because if we raise, then the cleanup
630 # doesn't get called and we might end with a zombie vpp
636 cls.reporter.send_keep_alive(cls, 'setUpClass')
637 VppTestResult.current_test_case_info = TestCaseInfo(
638 cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
639 cls.vpp_stdout_deque = deque()
640 cls.vpp_stderr_deque = deque()
641 if not cls.debug_attach:
642 cls.pump_thread_stop_flag = Event()
643 cls.pump_thread_wakeup_pipe = os.pipe()
644 cls.pump_thread = Thread(target=pump_output, args=(cls,))
645 cls.pump_thread.daemon = True
646 cls.pump_thread.start()
647 if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach:
648 cls.vapi_response_timeout = 0
649 cls.vapi = VppPapiProvider(cls.__name__, cls,
650 cls.vapi_response_timeout)
652 hook = hookmodule.StepHook(cls)
654 hook = hookmodule.PollHook(cls)
655 cls.vapi.register_hook(hook)
656 cls.statistics = VPPStats(socketname=cls.get_stats_sock_path())
660 cls.vpp_startup_failed = True
662 "VPP died shortly after startup, check the"
663 " output to standard error for possible cause")
667 except (vpp_papi.VPPIOError, Exception) as e:
668 cls.logger.debug("Exception connecting to vapi: %s" % e)
669 cls.vapi.disconnect()
671 if cls.debug_gdbserver:
672 print(colorize("You're running VPP inside gdbserver but "
673 "VPP-API connection failed, did you forget "
674 "to 'continue' VPP from within gdb?", RED))
677 last_line = cls.vapi.cli("show thread").split("\n")[-2]
678 cls.vpp_worker_count = int(last_line.split(" ")[0])
679 print("Detected VPP with %s workers." % cls.vpp_worker_count)
680 except vpp_papi.VPPRuntimeError as e:
681 cls.logger.debug("%s" % e)
684 except Exception as e:
685 cls.logger.debug("Exception connecting to VPP: %s" % e)
690 def _debug_quit(cls):
691 if (cls.debug_gdbserver or cls.debug_gdb):
695 if cls.vpp.returncode is None:
697 print(double_line_delim)
698 print("VPP or GDB server is still running")
699 print(single_line_delim)
700 input("When done debugging, press ENTER to kill the "
701 "process and finish running the testcase...")
702 except AttributeError:
708 Disconnect vpp-api, kill vpp and cleanup shared memory files
712 # first signal that we want to stop the pump thread, then wake it up
713 if hasattr(cls, 'pump_thread_stop_flag'):
714 cls.pump_thread_stop_flag.set()
715 if hasattr(cls, 'pump_thread_wakeup_pipe'):
716 os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
717 if hasattr(cls, 'pump_thread'):
718 cls.logger.debug("Waiting for pump thread to stop")
719 cls.pump_thread.join()
720 if hasattr(cls, 'vpp_stderr_reader_thread'):
721 cls.logger.debug("Waiting for stderr pump to stop")
722 cls.vpp_stderr_reader_thread.join()
724 if hasattr(cls, 'vpp'):
725 if hasattr(cls, 'vapi'):
726 cls.logger.debug(cls.vapi.vpp.get_stats())
727 cls.logger.debug("Disconnecting class vapi client on %s",
729 cls.vapi.disconnect()
730 cls.logger.debug("Deleting class vapi attribute on %s",
734 if not cls.debug_attach and cls.vpp.returncode is None:
735 cls.wait_for_coredump()
736 cls.logger.debug("Sending TERM to vpp")
738 cls.logger.debug("Waiting for vpp to die")
740 outs, errs = cls.vpp.communicate(timeout=5)
741 except subprocess.TimeoutExpired:
743 outs, errs = cls.vpp.communicate()
744 cls.logger.debug("Deleting class vpp attribute on %s",
746 if not cls.debug_attach:
747 cls.vpp.stdout.close()
748 cls.vpp.stderr.close()
751 if cls.vpp_startup_failed:
752 stdout_log = cls.logger.info
753 stderr_log = cls.logger.critical
755 stdout_log = cls.logger.info
756 stderr_log = cls.logger.info
758 if hasattr(cls, 'vpp_stdout_deque'):
759 stdout_log(single_line_delim)
760 stdout_log('VPP output to stdout while running %s:', cls.__name__)
761 stdout_log(single_line_delim)
762 vpp_output = "".join(cls.vpp_stdout_deque)
763 with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
765 stdout_log('\n%s', vpp_output)
766 stdout_log(single_line_delim)
768 if hasattr(cls, 'vpp_stderr_deque'):
769 stderr_log(single_line_delim)
770 stderr_log('VPP output to stderr while running %s:', cls.__name__)
771 stderr_log(single_line_delim)
772 vpp_output = "".join(cls.vpp_stderr_deque)
773 with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
775 stderr_log('\n%s', vpp_output)
776 stderr_log(single_line_delim)
779 def tearDownClass(cls):
780 """ Perform final cleanup after running all tests in this test-case """
781 cls.logger.debug("--- tearDownClass() for %s called ---" %
783 cls.reporter.send_keep_alive(cls, 'tearDownClass')
785 cls.file_handler.close()
786 cls.reset_packet_infos()
788 debug_internal.on_tear_down_class(cls)
790 def show_commands_at_teardown(self):
791 """ Allow subclass specific teardown logging additions."""
792 self.logger.info("--- No test specific show commands provided. ---")
795 """ Show various debug prints after each test """
796 self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
797 (self.__class__.__name__, self._testMethodName,
798 self._testMethodDoc))
801 if not self.vpp_dead:
802 self.logger.debug(self.vapi.cli("show trace max 1000"))
803 self.logger.info(self.vapi.ppcli("show interface"))
804 self.logger.info(self.vapi.ppcli("show hardware"))
805 self.logger.info(self.statistics.set_errors_str())
806 self.logger.info(self.vapi.ppcli("show run"))
807 self.logger.info(self.vapi.ppcli("show log"))
808 self.logger.info(self.vapi.ppcli("show bihash"))
809 self.logger.info("Logging testcase specific show commands.")
810 self.show_commands_at_teardown()
811 self.registry.remove_vpp_config(self.logger)
812 # Save/Dump VPP api trace log
813 m = self._testMethodName
814 api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
815 tmp_api_trace = "/tmp/%s" % api_trace
816 vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
817 self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
818 self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
820 os.rename(tmp_api_trace, vpp_api_trace_log)
821 self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
823 except VppTransportSocketIOError:
824 self.logger.debug("VppTransportSocketIOError: Vpp dead. "
825 "Cannot log show commands.")
828 self.registry.unregister_all(self.logger)
831 """ Clear trace before running each test"""
832 super(VppTestCase, self).setUp()
833 self.reporter.send_keep_alive(self)
835 raise VppDiedError(rv=None, testcase=self.__class__.__name__,
836 method_name=self._testMethodName)
837 self.sleep(.1, "during setUp")
838 self.vpp_stdout_deque.append(
839 "--- test setUp() for %s.%s(%s) starts here ---\n" %
840 (self.__class__.__name__, self._testMethodName,
841 self._testMethodDoc))
842 self.vpp_stderr_deque.append(
843 "--- test setUp() for %s.%s(%s) starts here ---\n" %
844 (self.__class__.__name__, self._testMethodName,
845 self._testMethodDoc))
846 self.vapi.cli("clear trace")
847 # store the test instance inside the test class - so that objects
848 # holding the class can access instance methods (like assertEqual)
849 type(self).test_instance = self
852 def pg_enable_capture(cls, interfaces=None):
854 Enable capture on packet-generator interfaces
856 :param interfaces: iterable interface indexes (if None,
857 use self.pg_interfaces)
860 if interfaces is None:
861 interfaces = cls.pg_interfaces
866 def register_pcap(cls, intf, worker):
867 """ Register a pcap in the testclass """
868 # add to the list of captures with current timestamp
869 cls._pcaps.append((intf, worker))
872 def get_vpp_time(cls):
873 # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
874 # returns float("2.190522")
875 timestr = cls.vapi.cli('show clock')
876 head, sep, tail = timestr.partition(',')
877 head, sep, tail = head.partition('Time now')
881 def sleep_on_vpp_time(cls, sec):
882 """ Sleep according to time in VPP world """
883 # On a busy system with many processes
884 # we might end up with VPP time being slower than real world
885 # So take that into account when waiting for VPP to do something
886 start_time = cls.get_vpp_time()
887 while cls.get_vpp_time() - start_time < sec:
891 def pg_start(cls, trace=True):
892 """ Enable the PG, wait till it is done, then clean up """
893 for (intf, worker) in cls._old_pcaps:
894 intf.rename_old_pcap_file(intf.get_in_path(worker),
895 intf.in_history_counter)
898 cls.vapi.cli("clear trace")
899 cls.vapi.cli("trace add pg-input 1000")
900 cls.vapi.cli('packet-generator enable')
901 # PG, when starts, runs to completion -
902 # so let's avoid a race condition,
903 # and wait a little till it's done.
904 # Then clean it up - and then be gone.
905 deadline = time.time() + 300
906 while cls.vapi.cli('show packet-generator').find("Yes") != -1:
907 cls.sleep(0.01) # yield
908 if time.time() > deadline:
909 cls.logger.error("Timeout waiting for pg to stop")
911 for intf, worker in cls._pcaps:
912 cls.vapi.cli('packet-generator delete %s' %
913 intf.get_cap_name(worker))
914 cls._old_pcaps = cls._pcaps
918 def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
920 Create packet-generator interfaces.
922 :param interfaces: iterable indexes of the interfaces.
923 :returns: List of created interfaces.
928 intf = VppPGInterface(cls, i, gso, gso_size)
929 setattr(cls, intf.name, intf)
931 cls.pg_interfaces = result
935 def create_loopback_interfaces(cls, count):
937 Create loopback interfaces.
939 :param count: number of interfaces created.
940 :returns: List of created interfaces.
942 result = [VppLoInterface(cls) for i in range(count)]
944 setattr(cls, intf.name, intf)
945 cls.lo_interfaces = result
949 def create_bvi_interfaces(cls, count):
951 Create BVI interfaces.
953 :param count: number of interfaces created.
954 :returns: List of created interfaces.
956 result = [VppBviInterface(cls) for i in range(count)]
958 setattr(cls, intf.name, intf)
959 cls.bvi_interfaces = result
963 def extend_packet(packet, size, padding=' '):
965 Extend packet to given size by padding with spaces or custom padding
966 NOTE: Currently works only when Raw layer is present.
968 :param packet: packet
969 :param size: target size
970 :param padding: padding used to extend the payload
973 packet_len = len(packet) + 4
974 extend = size - packet_len
976 num = (extend // len(padding)) + 1
977 packet[Raw].load += (padding * num)[:extend].encode("ascii")
980 def reset_packet_infos(cls):
981 """ Reset the list of packet info objects and packet counts to zero """
982 cls._packet_infos = {}
983 cls._packet_count_for_dst_if_idx = {}
986 def create_packet_info(cls, src_if, dst_if):
988 Create packet info object containing the source and destination indexes
989 and add it to the testcase's packet info list
991 :param VppInterface src_if: source interface
992 :param VppInterface dst_if: destination interface
994 :returns: _PacketInfo object
998 info.index = len(cls._packet_infos)
999 info.src = src_if.sw_if_index
1000 info.dst = dst_if.sw_if_index
1001 if isinstance(dst_if, VppSubInterface):
1002 dst_idx = dst_if.parent.sw_if_index
1004 dst_idx = dst_if.sw_if_index
1005 if dst_idx in cls._packet_count_for_dst_if_idx:
1006 cls._packet_count_for_dst_if_idx[dst_idx] += 1
1008 cls._packet_count_for_dst_if_idx[dst_idx] = 1
1009 cls._packet_infos[info.index] = info
1013 def info_to_payload(info):
1015 Convert _PacketInfo object to packet payload
1017 :param info: _PacketInfo object
1019 :returns: string containing serialized data from packet info
1021 return "%d %d %d %d %d" % (info.index, info.src, info.dst,
1022 info.ip, info.proto)
1025 def payload_to_info(payload, payload_field='load'):
1027 Convert packet payload to _PacketInfo object
1029 :param payload: packet payload
1030 :type payload: <class 'scapy.packet.Raw'>
1031 :param payload_field: packet fieldname of payload "load" for
1032 <class 'scapy.packet.Raw'>
1033 :type payload_field: str
1034 :returns: _PacketInfo object containing de-serialized data from payload
1037 numbers = getattr(payload, payload_field).split()
1038 info = _PacketInfo()
1039 info.index = int(numbers[0])
1040 info.src = int(numbers[1])
1041 info.dst = int(numbers[2])
1042 info.ip = int(numbers[3])
1043 info.proto = int(numbers[4])
1046 def get_next_packet_info(self, info):
1048 Iterate over the packet info list stored in the testcase
1049 Start iteration with first element if info is None
1050 Continue based on index in info if info is specified
1052 :param info: info or None
1053 :returns: next info in list or None if no more infos
1058 next_index = info.index + 1
1059 if next_index == len(self._packet_infos):
1062 return self._packet_infos[next_index]
1064 def get_next_packet_info_for_interface(self, src_index, info):
1066 Search the packet info list for the next packet info with same source
1069 :param src_index: source interface index to search for
1070 :param info: packet info - where to start the search
1071 :returns: packet info or None
1075 info = self.get_next_packet_info(info)
1078 if info.src == src_index:
1081 def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
1083 Search the packet info list for the next packet info with same source
1084 and destination interface indexes
1086 :param src_index: source interface index to search for
1087 :param dst_index: destination interface index to search for
1088 :param info: packet info - where to start the search
1089 :returns: packet info or None
1093 info = self.get_next_packet_info_for_interface(src_index, info)
1096 if info.dst == dst_index:
1099 def assert_equal(self, real_value, expected_value, name_or_class=None):
1100 if name_or_class is None:
1101 self.assertEqual(real_value, expected_value)
1104 msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
1105 msg = msg % (getdoc(name_or_class).strip(),
1106 real_value, str(name_or_class(real_value)),
1107 expected_value, str(name_or_class(expected_value)))
1109 msg = "Invalid %s: %s does not match expected value %s" % (
1110 name_or_class, real_value, expected_value)
1112 self.assertEqual(real_value, expected_value, msg)
1114 def assert_in_range(self,
1122 msg = "Invalid %s: %s out of range <%s,%s>" % (
1123 name, real_value, expected_min, expected_max)
1124 self.assertTrue(expected_min <= real_value <= expected_max, msg)
1126 def assert_packet_checksums_valid(self, packet,
1127 ignore_zero_udp_checksums=True):
1128 received = packet.__class__(scapy.compat.raw(packet))
1129 udp_layers = ['UDP', 'UDPerror']
1130 checksum_fields = ['cksum', 'chksum']
1133 temp = received.__class__(scapy.compat.raw(received))
1135 layer = temp.getlayer(counter)
1137 layer = layer.copy()
1138 layer.remove_payload()
1139 for cf in checksum_fields:
1140 if hasattr(layer, cf):
1141 if ignore_zero_udp_checksums and \
1142 0 == getattr(layer, cf) and \
1143 layer.name in udp_layers:
1145 delattr(temp.getlayer(counter), cf)
1146 checksums.append((counter, cf))
1149 counter = counter + 1
1150 if 0 == len(checksums):
1152 temp = temp.__class__(scapy.compat.raw(temp))
1153 for layer, cf in checksums:
1154 calc_sum = getattr(temp[layer], cf)
1156 getattr(received[layer], cf), calc_sum,
1157 "packet checksum on layer #%d: %s" % (layer, temp[layer].name))
1159 "Checksum field `%s` on `%s` layer has correct value `%s`" %
1160 (cf, temp[layer].name, calc_sum))
1162 def assert_checksum_valid(self, received_packet, layer,
1163 field_name='chksum',
1164 ignore_zero_checksum=False):
1165 """ Check checksum of received packet on given layer """
1166 received_packet_checksum = getattr(received_packet[layer], field_name)
1167 if ignore_zero_checksum and 0 == received_packet_checksum:
1169 recalculated = received_packet.__class__(
1170 scapy.compat.raw(received_packet))
1171 delattr(recalculated[layer], field_name)
1172 recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
1173 self.assert_equal(received_packet_checksum,
1174 getattr(recalculated[layer], field_name),
1175 "packet checksum on layer: %s" % layer)
1177 def assert_ip_checksum_valid(self, received_packet,
1178 ignore_zero_checksum=False):
1179 self.assert_checksum_valid(received_packet, 'IP',
1180 ignore_zero_checksum=ignore_zero_checksum)
1182 def assert_tcp_checksum_valid(self, received_packet,
1183 ignore_zero_checksum=False):
1184 self.assert_checksum_valid(received_packet, 'TCP',
1185 ignore_zero_checksum=ignore_zero_checksum)
1187 def assert_udp_checksum_valid(self, received_packet,
1188 ignore_zero_checksum=True):
1189 self.assert_checksum_valid(received_packet, 'UDP',
1190 ignore_zero_checksum=ignore_zero_checksum)
1192 def assert_embedded_icmp_checksum_valid(self, received_packet):
1193 if received_packet.haslayer(IPerror):
1194 self.assert_checksum_valid(received_packet, 'IPerror')
1195 if received_packet.haslayer(TCPerror):
1196 self.assert_checksum_valid(received_packet, 'TCPerror')
1197 if received_packet.haslayer(UDPerror):
1198 self.assert_checksum_valid(received_packet, 'UDPerror',
1199 ignore_zero_checksum=True)
1200 if received_packet.haslayer(ICMPerror):
1201 self.assert_checksum_valid(received_packet, 'ICMPerror')
1203 def assert_icmp_checksum_valid(self, received_packet):
1204 self.assert_checksum_valid(received_packet, 'ICMP')
1205 self.assert_embedded_icmp_checksum_valid(received_packet)
1207 def assert_icmpv6_checksum_valid(self, pkt):
1208 if pkt.haslayer(ICMPv6DestUnreach):
1209 self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
1210 self.assert_embedded_icmp_checksum_valid(pkt)
1211 if pkt.haslayer(ICMPv6EchoRequest):
1212 self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
1213 if pkt.haslayer(ICMPv6EchoReply):
1214 self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
1216 def get_packet_counter(self, counter):
1217 if counter.startswith("/"):
1218 counter_value = self.statistics.get_counter(counter)
1220 counters = self.vapi.cli("sh errors").split('\n')
1222 for i in range(1, len(counters) - 1):
1223 results = counters[i].split()
1224 if results[1] == counter:
1225 counter_value = int(results[0])
1227 return counter_value
1229 def assert_packet_counter_equal(self, counter, expected_value):
1230 counter_value = self.get_packet_counter(counter)
1231 self.assert_equal(counter_value, expected_value,
1232 "packet counter `%s'" % counter)
1234 def assert_error_counter_equal(self, counter, expected_value):
1235 counter_value = self.statistics[counter].sum()
1236 self.assert_equal(counter_value, expected_value,
1237 "error counter `%s'" % counter)
1240 def sleep(cls, timeout, remark=None):
1242 # /* Allow sleep(0) to maintain win32 semantics, and as decreed
1243 # * by Guido, only the main thread can be interrupted.
1245 # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
1248 if hasattr(os, 'sched_yield'):
1254 cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
1255 before = time.time()
1258 if after - before > 2 * timeout:
1259 cls.logger.error("unexpected self.sleep() result - "
1260 "slept for %es instead of ~%es!",
1261 after - before, timeout)
1264 "Finished sleep (%s) - slept %es (wanted %es)",
1265 remark, after - before, timeout)
1267 def pg_send(self, intf, pkts, worker=None, trace=True):
1268 intf.add_stream(pkts, worker=worker)
1269 self.pg_enable_capture(self.pg_interfaces)
1270 self.pg_start(trace=trace)
1272 def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
1273 self.pg_send(intf, pkts)
1276 for i in self.pg_interfaces:
1277 i.get_capture(0, timeout=timeout)
1278 i.assert_nothing_captured(remark=remark)
1281 def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None,
1285 self.pg_send(intf, pkts, worker=worker, trace=trace)
1286 rx = output.get_capture(n_rx)
1289 def send_and_expect_only(self, intf, pkts, output, timeout=None):
1290 self.pg_send(intf, pkts)
1291 rx = output.get_capture(len(pkts))
1295 for i in self.pg_interfaces:
1296 if i not in outputs:
1297 i.get_capture(0, timeout=timeout)
1298 i.assert_nothing_captured()
1304 def get_testcase_doc_name(test):
1305 return getdoc(test.__class__).splitlines()[0]
1308 def get_test_description(descriptions, test):
1309 short_description = test.shortDescription()
1310 if descriptions and short_description:
1311 return short_description
1316 class TestCaseInfo(object):
1317 def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
1318 self.logger = logger
1319 self.tempdir = tempdir
1320 self.vpp_pid = vpp_pid
1321 self.vpp_bin_path = vpp_bin_path
1322 self.core_crash_test = None
1325 class VppTestResult(unittest.TestResult):
1327 @property result_string
1328 String variable to store the test case result string.
1330 List variable containing 2-tuples of TestCase instances and strings
1331 holding formatted tracebacks. Each tuple represents a test which
1332 raised an unexpected exception.
1334 List variable containing 2-tuples of TestCase instances and strings
1335 holding formatted tracebacks. Each tuple represents a test where
1336 a failure was explicitly signalled using the TestCase.assert*()
1340 failed_test_cases_info = set()
1341 core_crash_test_cases_info = set()
1342 current_test_case_info = None
1344 def __init__(self, stream=None, descriptions=None, verbosity=None,
1347 :param stream File descriptor to store where to report test results.
1348 Set to the standard error stream by default.
1349 :param descriptions Boolean variable to store information if to use
1350 test case descriptions.
1351 :param verbosity Integer variable to store required verbosity level.
1353 super(VppTestResult, self).__init__(stream, descriptions, verbosity)
1354 self.stream = stream
1355 self.descriptions = descriptions
1356 self.verbosity = verbosity
1357 self.result_string = None
1358 self.runner = runner
1361 def addSuccess(self, test):
1363 Record a test succeeded result
1368 if self.current_test_case_info:
1369 self.current_test_case_info.logger.debug(
1370 "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
1371 test._testMethodName,
1372 test._testMethodDoc))
1373 unittest.TestResult.addSuccess(self, test)
1374 self.result_string = colorize("OK", GREEN)
1376 self.send_result_through_pipe(test, PASS)
1378 def addSkip(self, test, reason):
1380 Record a test skipped.
1386 if self.current_test_case_info:
1387 self.current_test_case_info.logger.debug(
1388 "--- addSkip() %s.%s(%s) called, reason is %s" %
1389 (test.__class__.__name__, test._testMethodName,
1390 test._testMethodDoc, reason))
1391 unittest.TestResult.addSkip(self, test, reason)
1392 self.result_string = colorize("SKIP", YELLOW)
1394 if reason == "not enough cpus":
1395 self.send_result_through_pipe(test, SKIP_CPU_SHORTAGE)
1397 self.send_result_through_pipe(test, SKIP)
1399 def symlink_failed(self):
1400 if self.current_test_case_info:
1402 failed_dir = os.getenv('FAILED_DIR')
1403 link_path = os.path.join(
1406 os.path.basename(self.current_test_case_info.tempdir))
1408 self.current_test_case_info.logger.debug(
1409 "creating a link to the failed test")
1410 self.current_test_case_info.logger.debug(
1411 "os.symlink(%s, %s)" %
1412 (self.current_test_case_info.tempdir, link_path))
1413 if os.path.exists(link_path):
1414 self.current_test_case_info.logger.debug(
1415 'symlink already exists')
1417 os.symlink(self.current_test_case_info.tempdir, link_path)
1419 except Exception as e:
1420 self.current_test_case_info.logger.error(e)
1422 def send_result_through_pipe(self, test, result):
1423 if hasattr(self, 'test_framework_result_pipe'):
1424 pipe = self.test_framework_result_pipe
1426 pipe.send((test.id(), result))
1428 def log_error(self, test, err, fn_name):
1429 if self.current_test_case_info:
1430 if isinstance(test, unittest.suite._ErrorHolder):
1431 test_name = test.description
1433 test_name = '%s.%s(%s)' % (test.__class__.__name__,
1434 test._testMethodName,
1435 test._testMethodDoc)
1436 self.current_test_case_info.logger.debug(
1437 "--- %s() %s called, err is %s" %
1438 (fn_name, test_name, err))
1439 self.current_test_case_info.logger.debug(
1440 "formatted exception is:\n%s" %
1441 "".join(format_exception(*err)))
1443 def add_error(self, test, err, unittest_fn, error_type):
1444 if error_type == FAIL:
1445 self.log_error(test, err, 'addFailure')
1446 error_type_str = colorize("FAIL", RED)
1447 elif error_type == ERROR:
1448 self.log_error(test, err, 'addError')
1449 error_type_str = colorize("ERROR", RED)
1451 raise Exception('Error type %s cannot be used to record an '
1452 'error or a failure' % error_type)
1454 unittest_fn(self, test, err)
1455 if self.current_test_case_info:
1456 self.result_string = "%s [ temp dir used by test case: %s ]" % \
1458 self.current_test_case_info.tempdir)
1459 self.symlink_failed()
1460 self.failed_test_cases_info.add(self.current_test_case_info)
1461 if is_core_present(self.current_test_case_info.tempdir):
1462 if not self.current_test_case_info.core_crash_test:
1463 if isinstance(test, unittest.suite._ErrorHolder):
1464 test_name = str(test)
1466 test_name = "'{!s}' ({!s})".format(
1467 get_testcase_doc_name(test), test.id())
1468 self.current_test_case_info.core_crash_test = test_name
1469 self.core_crash_test_cases_info.add(
1470 self.current_test_case_info)
1472 self.result_string = '%s [no temp dir]' % error_type_str
1474 self.send_result_through_pipe(test, error_type)
1476 def addFailure(self, test, err):
1478 Record a test failed result
1481 :param err: error message
1484 self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
1486 def addError(self, test, err):
1488 Record a test error result
1491 :param err: error message
1494 self.add_error(test, err, unittest.TestResult.addError, ERROR)
1496 def getDescription(self, test):
1498 Get test description
1501 :returns: test description
1504 return get_test_description(self.descriptions, test)
1506 def startTest(self, test):
1514 def print_header(test):
1515 if test.__class__ in self.printed:
1518 test_doc = getdoc(test)
1520 raise Exception("No doc string for test '%s'" % test.id())
1522 test_title = test_doc.splitlines()[0].rstrip()
1523 test_title = colorize(test_title, GREEN)
1524 if test.is_tagged_run_solo():
1525 test_title = colorize(f"SOLO RUN: {test_title}", YELLOW)
1527 # This block may overwrite the colorized title above,
1528 # but we want this to stand out and be fixed
1529 if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
1530 test_title = colorize(
1531 f"FIXME with VPP workers: {test_title}", RED)
1533 if hasattr(test, 'vpp_worker_count'):
1534 if test.vpp_worker_count == 0:
1535 test_title += " [main thread only]"
1536 elif test.vpp_worker_count == 1:
1537 test_title += " [1 worker thread]"
1539 test_title += f" [{test.vpp_worker_count} worker threads]"
1541 if test.__class__.skipped_due_to_cpu_lack:
1542 test_title = colorize(
1543 f"{test_title} [skipped - not enough cpus, "
1544 f"required={test.__class__.get_cpus_required()}, "
1545 f"available={max_vpp_cpus}]", YELLOW)
1547 print(double_line_delim)
1549 print(double_line_delim)
1550 self.printed.append(test.__class__)
1553 self.start_test = time.time()
1554 unittest.TestResult.startTest(self, test)
1555 if self.verbosity > 0:
1556 self.stream.writeln(
1557 "Starting " + self.getDescription(test) + " ...")
1558 self.stream.writeln(single_line_delim)
1560 def stopTest(self, test):
1562 Called when the given test has been run
1567 unittest.TestResult.stopTest(self, test)
1569 if self.verbosity > 0:
1570 self.stream.writeln(single_line_delim)
1571 self.stream.writeln("%-73s%s" % (self.getDescription(test),
1572 self.result_string))
1573 self.stream.writeln(single_line_delim)
1575 self.stream.writeln("%-68s %4.2f %s" %
1576 (self.getDescription(test),
1577 time.time() - self.start_test,
1578 self.result_string))
1580 self.send_result_through_pipe(test, TEST_RUN)
1582 def printErrors(self):
1584 Print errors from running the test case
1586 if len(self.errors) > 0 or len(self.failures) > 0:
1587 self.stream.writeln()
1588 self.printErrorList('ERROR', self.errors)
1589 self.printErrorList('FAIL', self.failures)
1591 # ^^ that is the last output from unittest before summary
1592 if not self.runner.print_summary:
1593 devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
1594 self.stream = devnull
1595 self.runner.stream = devnull
1597 def printErrorList(self, flavour, errors):
1599 Print error list to the output stream together with error type
1600 and test case description.
1602 :param flavour: error type
1603 :param errors: iterable errors
1606 for test, err in errors:
1607 self.stream.writeln(double_line_delim)
1608 self.stream.writeln("%s: %s" %
1609 (flavour, self.getDescription(test)))
1610 self.stream.writeln(single_line_delim)
1611 self.stream.writeln("%s" % err)
1614 class VppTestRunner(unittest.TextTestRunner):
1616 A basic test runner implementation which prints results to standard error.
1620 def resultclass(self):
1621 """Class maintaining the results of the tests"""
1622 return VppTestResult
1624 def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
1625 result_pipe=None, failfast=False, buffer=False,
1626 resultclass=None, print_summary=True, **kwargs):
1627 # ignore stream setting here, use hard-coded stdout to be in sync
1628 # with prints from VppTestCase methods ...
1629 super(VppTestRunner, self).__init__(sys.stdout, descriptions,
1630 verbosity, failfast, buffer,
1631 resultclass, **kwargs)
1632 KeepAliveReporter.pipe = keep_alive_pipe
1634 self.orig_stream = self.stream
1635 self.resultclass.test_framework_result_pipe = result_pipe
1637 self.print_summary = print_summary
1639 def _makeResult(self):
1640 return self.resultclass(self.stream,
1645 def run(self, test):
1652 faulthandler.enable() # emit stack trace to stderr if killed by signal
1654 result = super(VppTestRunner, self).run(test)
1655 if not self.print_summary:
1656 self.stream = self.orig_stream
1657 result.stream = self.orig_stream
1661 class Worker(Thread):
1662 def __init__(self, executable_args, logger, env=None, *args, **kwargs):
1663 super(Worker, self).__init__(*args, **kwargs)
1664 self.logger = logger
1665 self.args = executable_args
1666 if hasattr(self, 'testcase') and self.testcase.debug_all:
1667 if self.testcase.debug_gdbserver:
1668 self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
1669 .format(port=self.testcase.gdbserver_port)] + args
1670 elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
1671 self.args.append(self.wait_for_gdb)
1672 self.app_bin = executable_args[0]
1673 self.app_name = os.path.basename(self.app_bin)
1674 if hasattr(self, 'role'):
1675 self.app_name += ' {role}'.format(role=self.role)
1678 env = {} if env is None else env
1679 self.env = copy.deepcopy(env)
1681 def wait_for_enter(self):
1682 if not hasattr(self, 'testcase'):
1684 if self.testcase.debug_all and self.testcase.debug_gdbserver:
1686 print(double_line_delim)
1687 print("Spawned GDB Server for '{app}' with PID: {pid}"
1688 .format(app=self.app_name, pid=self.process.pid))
1689 elif self.testcase.debug_all and self.testcase.debug_gdb:
1691 print(double_line_delim)
1692 print("Spawned '{app}' with PID: {pid}"
1693 .format(app=self.app_name, pid=self.process.pid))
1696 print(single_line_delim)
1697 print("You can debug '{app}' using:".format(app=self.app_name))
1698 if self.testcase.debug_gdbserver:
1699 print("sudo gdb " + self.app_bin +
1700 " -ex 'target remote localhost:{port}'"
1701 .format(port=self.testcase.gdbserver_port))
1702 print("Now is the time to attach gdb by running the above "
1703 "command, set up breakpoints etc., then resume from "
1704 "within gdb by issuing the 'continue' command")
1705 self.testcase.gdbserver_port += 1
1706 elif self.testcase.debug_gdb:
1707 print("sudo gdb " + self.app_bin +
1708 " -ex 'attach {pid}'".format(pid=self.process.pid))
1709 print("Now is the time to attach gdb by running the above "
1710 "command and set up breakpoints etc., then resume from"
1711 " within gdb by issuing the 'continue' command")
1712 print(single_line_delim)
1713 input("Press ENTER to continue running the testcase...")
1716 executable = self.args[0]
1717 if not os.path.exists(executable) or not os.access(
1718 executable, os.F_OK | os.X_OK):
1719 # Exit code that means some system file did not exist,
1720 # could not be opened, or had some other kind of error.
1721 self.result = os.EX_OSFILE
1722 raise EnvironmentError(
1723 "executable '%s' is not found or executable." % executable)
1724 self.logger.debug("Running executable: '{app}'"
1725 .format(app=' '.join(self.args)))
1726 env = os.environ.copy()
1727 env.update(self.env)
1728 env["CK_LOG_FILE_NAME"] = "-"
1729 self.process = subprocess.Popen(
1730 self.args, shell=False, env=env, preexec_fn=os.setpgrp,
1731 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1732 self.wait_for_enter()
1733 out, err = self.process.communicate()
1734 self.logger.debug("Finished running `{app}'".format(app=self.app_name))
1735 self.logger.info("Return code is `%s'" % self.process.returncode)
1736 self.logger.info(single_line_delim)
1737 self.logger.info("Executable `{app}' wrote to stdout:"
1738 .format(app=self.app_name))
1739 self.logger.info(single_line_delim)
1740 self.logger.info(out.decode('utf-8'))
1741 self.logger.info(single_line_delim)
1742 self.logger.info("Executable `{app}' wrote to stderr:"
1743 .format(app=self.app_name))
1744 self.logger.info(single_line_delim)
1745 self.logger.info(err.decode('utf-8'))
1746 self.logger.info(single_line_delim)
1747 self.result = self.process.returncode
1750 if __name__ == '__main__':