3 from __future__ import print_function
19 from collections import deque
20 from threading import Thread, Event
21 from inspect import getdoc, isclass
22 from traceback import format_exception
23 from logging import FileHandler, DEBUG, Formatter
25 from abc import ABC, abstractmethod
28 from scapy.packet import Raw
29 import hook as hookmodule
30 from vpp_pg_interface import VppPGInterface
31 from vpp_sub_interface import VppSubInterface
32 from vpp_lo_interface import VppLoInterface
33 from vpp_bvi_interface import VppBviInterface
34 from vpp_papi_provider import VppPapiProvider
36 from vpp_papi.vpp_stats import VPPStats
37 from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
38 from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
40 from vpp_object import VppObjectRegistry
41 from util import ppp, is_core_present
42 from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
43 from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
44 from scapy.layers.inet6 import ICMPv6EchoReply
46 from cpu_config import available_cpus, num_cpus, max_vpp_cpus
48 logger = logging.getLogger(__name__)
50 # Set up an empty logger for the testcase that can be overridden as necessary
51 null_logger = logging.getLogger('VppTestCase')
52 null_logger.addHandler(logging.NullHandler())
62 class BoolEnvironmentVariable(object):
64 def __init__(self, env_var_name, default='n', true_values=None):
65 self.name = env_var_name
66 self.default = default
67 self.true_values = true_values if true_values is not None else \
71 return os.getenv(self.name, self.default).lower() in self.true_values
73 if sys.version_info[0] == 2:
74 __nonzero__ = __bool__
77 return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
78 (self.name, self.default, self.true_values)
81 debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
86 Test framework module.
88 The module provides a set of tools for constructing and running tests and
89 representing the results.
93 class VppDiedError(Exception):
94 """ exception for reporting that the subprocess has died."""
96 signals_by_value = {v: k for k, v in signal.__dict__.items() if
97 k.startswith('SIG') and not k.startswith('SIG_')}
99 def __init__(self, rv=None, testcase=None, method_name=None):
101 self.signal_name = None
102 self.testcase = testcase
103 self.method_name = method_name
106 self.signal_name = VppDiedError.signals_by_value[-rv]
107 except (KeyError, TypeError):
110 if testcase is None and method_name is None:
113 in_msg = ' while running %s.%s' % (testcase, method_name)
116 msg = "VPP subprocess died unexpectedly%s with return code: %d%s."\
117 % (in_msg, self.rv, ' [%s]' %
119 self.signal_name is not None else ''))
121 msg = "VPP subprocess died unexpectedly%s." % in_msg
123 super(VppDiedError, self).__init__(msg)
126 class _PacketInfo(object):
127 """Private class to create packet info object.
129 Help process information about the next packet.
130 Set variables to default values.
132 #: Store the index of the packet.
134 #: Store the index of the source packet generator interface of the packet.
136 #: Store the index of the destination packet generator interface
139 #: Store expected ip version
141 #: Store expected upper protocol
143 #: Store the copy of the former packet.
146 def __eq__(self, other):
147 index = self.index == other.index
148 src = self.src == other.src
149 dst = self.dst == other.dst
150 data = self.data == other.data
151 return index and src and dst and data
154 def pump_output(testclass):
155 """ pump output from vpp stdout/stderr to proper queues """
158 while not testclass.pump_thread_stop_flag.is_set():
159 readable = select.select([testclass.vpp.stdout.fileno(),
160 testclass.vpp.stderr.fileno(),
161 testclass.pump_thread_wakeup_pipe[0]],
163 if testclass.vpp.stdout.fileno() in readable:
164 read = os.read(testclass.vpp.stdout.fileno(), 102400)
166 split = read.decode('ascii',
167 errors='backslashreplace').splitlines(True)
168 if len(stdout_fragment) > 0:
169 split[0] = "%s%s" % (stdout_fragment, split[0])
170 if len(split) > 0 and split[-1].endswith("\n"):
174 stdout_fragment = split[-1]
175 testclass.vpp_stdout_deque.extend(split[:limit])
176 if not testclass.cache_vpp_output:
177 for line in split[:limit]:
178 testclass.logger.info(
179 "VPP STDOUT: %s" % line.rstrip("\n"))
180 if testclass.vpp.stderr.fileno() in readable:
181 read = os.read(testclass.vpp.stderr.fileno(), 102400)
183 split = read.decode('ascii',
184 errors='backslashreplace').splitlines(True)
185 if len(stderr_fragment) > 0:
186 split[0] = "%s%s" % (stderr_fragment, split[0])
187 if len(split) > 0 and split[-1].endswith("\n"):
191 stderr_fragment = split[-1]
193 testclass.vpp_stderr_deque.extend(split[:limit])
194 if not testclass.cache_vpp_output:
195 for line in split[:limit]:
196 testclass.logger.error(
197 "VPP STDERR: %s" % line.rstrip("\n"))
198 # ignoring the dummy pipe here intentionally - the
199 # flag will take care of properly terminating the loop
202 def _is_skip_aarch64_set():
203 return BoolEnvironmentVariable('SKIP_AARCH64')
206 is_skip_aarch64_set = _is_skip_aarch64_set()
209 def _is_platform_aarch64():
210 return platform.machine() == 'aarch64'
213 is_platform_aarch64 = _is_platform_aarch64()
216 def _running_extended_tests():
217 return BoolEnvironmentVariable("EXTENDED_TESTS")
220 running_extended_tests = _running_extended_tests()
223 def _running_gcov_tests():
224 return BoolEnvironmentVariable("GCOV_TESTS")
227 running_gcov_tests = _running_gcov_tests()
230 def get_environ_vpp_worker_count():
231 worker_config = os.getenv("VPP_WORKER_CONFIG", None)
233 elems = worker_config.split(" ")
234 if elems[0] != "workers" or len(elems) != 2:
235 raise ValueError("Wrong VPP_WORKER_CONFIG == '%s' value." %
242 environ_vpp_worker_count = get_environ_vpp_worker_count()
245 class KeepAliveReporter(object):
247 Singleton object which reports test start to parent process
252 self.__dict__ = self._shared_state
260 def pipe(self, pipe):
261 if self._pipe is not None:
262 raise Exception("Internal error - pipe should only be set once.")
265 def send_keep_alive(self, test, desc=None):
267 Write current test tmpdir & desc to keep-alive pipe to signal liveness
269 if self.pipe is None:
270 # if not running forked..
274 desc = '%s (%s)' % (desc, unittest.util.strclass(test))
278 self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
281 class TestCaseTag(Enum):
282 # marks the suites that must run at the end
283 # using only a single test runner
285 # marks the suites broken on VPP multi-worker
286 FIXME_VPP_WORKERS = 2
289 def create_tag_decorator(e):
292 cls.test_tags.append(e)
293 except AttributeError:
299 tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
300 tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
314 class CPUInterface(ABC):
316 skipped_due_to_cpu_lack = False
320 def get_cpus_required(cls):
324 def assign_cpus(cls, cpus):
328 class VppTestCase(CPUInterface, unittest.TestCase):
329 """This subclass is a base class for VPP test cases that are implemented as
330 classes. It provides methods to create and run test case.
333 extra_vpp_punt_config = []
334 extra_vpp_plugin_config = []
336 vapi_response_timeout = 5
339 def packet_infos(self):
340 """List of packet infos"""
341 return self._packet_infos
344 def get_packet_count_for_if_idx(cls, dst_if_index):
345 """Get the number of packet info for specified destination if index"""
346 if dst_if_index in cls._packet_count_for_dst_if_idx:
347 return cls._packet_count_for_dst_if_idx[dst_if_index]
352 def has_tag(cls, tag):
353 """ if the test case has a given tag - return true """
355 return tag in cls.test_tags
356 except AttributeError:
361 def is_tagged_run_solo(cls):
362 """ if the test case class is timing-sensitive - return true """
363 return cls.has_tag(TestCaseTag.RUN_SOLO)
367 """Return the instance of this testcase"""
368 return cls.test_instance
371 def set_debug_flags(cls, d):
372 cls.gdbserver_port = 7777
373 cls.debug_core = False
374 cls.debug_gdb = False
375 cls.debug_gdbserver = False
376 cls.debug_all = False
377 cls.debug_attach = False
382 cls.debug_core = True
383 elif dl == "gdb" or dl == "gdb-all":
385 elif dl == "gdbserver" or dl == "gdbserver-all":
386 cls.debug_gdbserver = True
388 cls.debug_attach = True
390 raise Exception("Unrecognized DEBUG option: '%s'" % d)
391 if dl == "gdb-all" or dl == "gdbserver-all":
395 def get_vpp_worker_count(cls):
396 if not hasattr(cls, "vpp_worker_count"):
397 if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
398 cls.vpp_worker_count = 0
400 cls.vpp_worker_count = environ_vpp_worker_count
401 return cls.vpp_worker_count
404 def get_cpus_required(cls):
405 return 1 + cls.get_vpp_worker_count()
408 def setUpConstants(cls):
409 """ Set-up the test case class based on environment variables """
410 cls.step = BoolEnvironmentVariable('STEP')
411 # inverted case to handle '' == True
412 c = os.getenv("CACHE_OUTPUT", "1")
413 cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
414 cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
415 cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
416 cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
417 cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
419 if cls.plugin_path is not None:
420 if cls.extern_plugin_path is not None:
421 plugin_path = "%s:%s" % (
422 cls.plugin_path, cls.extern_plugin_path)
424 plugin_path = cls.plugin_path
425 elif cls.extern_plugin_path is not None:
426 plugin_path = cls.extern_plugin_path
428 if cls.step or cls.debug_gdb or cls.debug_gdbserver:
429 debug_cli = "cli-listen localhost:5002"
431 size = os.getenv("COREDUMP_SIZE")
433 coredump_size = "coredump-size %s" % size
434 if coredump_size is None:
435 coredump_size = "coredump-size unlimited"
437 default_variant = os.getenv("VARIANT")
438 if default_variant is not None:
439 default_variant = "defaults { %s 100 }" % default_variant
443 api_fuzzing = os.getenv("API_FUZZ")
444 if api_fuzzing is None:
449 "unix", "{", "nodaemon", debug_cli, "full-coredump",
450 coredump_size, "runtime-dir", cls.tempdir, "}",
451 "api-trace", "{", "on", "}",
452 "api-segment", "{", "prefix", cls.get_api_segment_prefix(), "}",
453 "cpu", "{", "main-core", str(cls.cpus[0]), ]
454 if cls.get_vpp_worker_count():
455 cls.vpp_cmdline.extend([
456 "corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])])
457 cls.vpp_cmdline.extend([
459 "physmem", "{", "max-size", "32m", "}",
460 "statseg", "{", "socket-name", cls.get_stats_sock_path(), "}",
461 "socksvr", "{", "socket-name", cls.get_api_sock_path(), "}",
462 "node { ", default_variant, "}",
463 "api-fuzz {", api_fuzzing, "}",
464 "plugins", "{", "plugin", "dpdk_plugin.so", "{", "disable", "}",
465 "plugin", "rdma_plugin.so", "{", "disable", "}",
466 "plugin", "lisp_unittest_plugin.so", "{", "enable", "}",
467 "plugin", "unittest_plugin.so", "{", "enable", "}"
468 ] + cls.extra_vpp_plugin_config + ["}", ])
470 if cls.extra_vpp_punt_config is not None:
471 cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
472 if plugin_path is not None:
473 cls.vpp_cmdline.extend(["plugin_path", plugin_path])
474 if cls.test_plugin_path is not None:
475 cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
477 if not cls.debug_attach:
478 cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
479 cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
482 def wait_for_enter(cls):
483 if cls.debug_gdbserver:
484 print(double_line_delim)
485 print("Spawned GDB server with PID: %d" % cls.vpp.pid)
487 print(double_line_delim)
488 print("Spawned VPP with PID: %d" % cls.vpp.pid)
490 cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
492 print(single_line_delim)
493 print("You can debug VPP using:")
494 if cls.debug_gdbserver:
495 print("sudo gdb " + cls.vpp_bin +
496 " -ex 'target remote localhost:{port}'"
497 .format(port=cls.gdbserver_port))
498 print("Now is the time to attach gdb by running the above "
499 "command, set up breakpoints etc., then resume VPP from "
500 "within gdb by issuing the 'continue' command")
501 cls.gdbserver_port += 1
503 print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
504 print("Now is the time to attach gdb by running the above "
505 "command and set up breakpoints etc., then resume VPP from"
506 " within gdb by issuing the 'continue' command")
507 print(single_line_delim)
508 input("Press ENTER to continue running the testcase...")
516 cls.logger.debug(f"Assigned cpus: {cls.cpus}")
517 cmdline = cls.vpp_cmdline
519 if cls.debug_gdbserver:
520 gdbserver = '/usr/bin/gdbserver'
521 if not os.path.isfile(gdbserver) or\
522 not os.access(gdbserver, os.X_OK):
523 raise Exception("gdbserver binary '%s' does not exist or is "
524 "not executable" % gdbserver)
526 cmdline = [gdbserver, 'localhost:{port}'
527 .format(port=cls.gdbserver_port)] + cls.vpp_cmdline
528 cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
531 cls.vpp = subprocess.Popen(cmdline,
532 stdout=subprocess.PIPE,
533 stderr=subprocess.PIPE)
534 except subprocess.CalledProcessError as e:
535 cls.logger.critical("Subprocess returned with non-0 return code: ("
539 cls.logger.critical("Subprocess returned with OS error: "
540 "(%s) %s", e.errno, e.strerror)
542 except Exception as e:
543 cls.logger.exception("Subprocess returned unexpected from "
550 def wait_for_coredump(cls):
551 corefile = cls.tempdir + "/core"
552 if os.path.isfile(corefile):
553 cls.logger.error("Waiting for coredump to complete: %s", corefile)
554 curr_size = os.path.getsize(corefile)
555 deadline = time.time() + 60
557 while time.time() < deadline:
560 curr_size = os.path.getsize(corefile)
561 if size == curr_size:
565 cls.logger.error("Timed out waiting for coredump to complete:"
568 cls.logger.error("Coredump complete: %s, size %d",
572 def get_stats_sock_path(cls):
573 return "%s/stats.sock" % cls.tempdir
576 def get_api_sock_path(cls):
577 return "%s/api.sock" % cls.tempdir
580 def get_api_segment_prefix(cls):
581 return os.path.basename(cls.tempdir) # Only used for VAPI
584 def get_tempdir(cls):
586 return os.getenv("VPP_IN_GDB_TMP_DIR",
587 "/tmp/unittest-attach-gdb")
589 return tempfile.mkdtemp(prefix='vpp-unittest-%s-' % cls.__name__)
594 Perform class setup before running the testcase
595 Remove shared memory files, start vpp and connect the vpp-api
597 super(VppTestCase, cls).setUpClass()
598 cls.logger = get_logger(cls.__name__)
599 seed = os.environ["RND_SEED"]
601 if hasattr(cls, 'parallel_handler'):
602 cls.logger.addHandler(cls.parallel_handler)
603 cls.logger.propagate = False
604 d = os.getenv("DEBUG", None)
605 cls.set_debug_flags(d)
606 cls.tempdir = cls.get_tempdir()
607 cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
608 cls.file_handler.setFormatter(
609 Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
611 cls.file_handler.setLevel(DEBUG)
612 cls.logger.addHandler(cls.file_handler)
613 cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__)
614 os.chdir(cls.tempdir)
615 cls.logger.info("Temporary dir is %s, api socket is %s",
616 cls.tempdir, cls.get_api_sock_path())
617 cls.logger.debug("Random seed is %s", seed)
619 cls.reset_packet_infos()
624 cls.registry = VppObjectRegistry()
625 cls.vpp_startup_failed = False
626 cls.reporter = KeepAliveReporter()
627 # need to catch exceptions here because if we raise, then the cleanup
628 # doesn't get called and we might end with a zombie vpp
634 cls.reporter.send_keep_alive(cls, 'setUpClass')
635 VppTestResult.current_test_case_info = TestCaseInfo(
636 cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
637 cls.vpp_stdout_deque = deque()
638 cls.vpp_stderr_deque = deque()
639 if not cls.debug_attach:
640 cls.pump_thread_stop_flag = Event()
641 cls.pump_thread_wakeup_pipe = os.pipe()
642 cls.pump_thread = Thread(target=pump_output, args=(cls,))
643 cls.pump_thread.daemon = True
644 cls.pump_thread.start()
645 if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach:
646 cls.vapi_response_timeout = 0
647 cls.vapi = VppPapiProvider(cls.__name__, cls,
648 cls.vapi_response_timeout)
650 hook = hookmodule.StepHook(cls)
652 hook = hookmodule.PollHook(cls)
653 cls.vapi.register_hook(hook)
654 cls.statistics = VPPStats(socketname=cls.get_stats_sock_path())
658 cls.vpp_startup_failed = True
660 "VPP died shortly after startup, check the"
661 " output to standard error for possible cause")
665 except (vpp_papi.VPPIOError, Exception) as e:
666 cls.logger.debug("Exception connecting to vapi: %s" % e)
667 cls.vapi.disconnect()
669 if cls.debug_gdbserver:
670 print(colorize("You're running VPP inside gdbserver but "
671 "VPP-API connection failed, did you forget "
672 "to 'continue' VPP from within gdb?", RED))
675 last_line = cls.vapi.cli("show thread").split("\n")[-2]
676 cls.vpp_worker_count = int(last_line.split(" ")[0])
677 print("Detected VPP with %s workers." % cls.vpp_worker_count)
678 except vpp_papi.VPPRuntimeError as e:
679 cls.logger.debug("%s" % e)
682 except Exception as e:
683 cls.logger.debug("Exception connecting to VPP: %s" % e)
688 def _debug_quit(cls):
689 if (cls.debug_gdbserver or cls.debug_gdb):
693 if cls.vpp.returncode is None:
695 print(double_line_delim)
696 print("VPP or GDB server is still running")
697 print(single_line_delim)
698 input("When done debugging, press ENTER to kill the "
699 "process and finish running the testcase...")
700 except AttributeError:
706 Disconnect vpp-api, kill vpp and cleanup shared memory files
710 # first signal that we want to stop the pump thread, then wake it up
711 if hasattr(cls, 'pump_thread_stop_flag'):
712 cls.pump_thread_stop_flag.set()
713 if hasattr(cls, 'pump_thread_wakeup_pipe'):
714 os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
715 if hasattr(cls, 'pump_thread'):
716 cls.logger.debug("Waiting for pump thread to stop")
717 cls.pump_thread.join()
718 if hasattr(cls, 'vpp_stderr_reader_thread'):
719 cls.logger.debug("Waiting for stderr pump to stop")
720 cls.vpp_stderr_reader_thread.join()
722 if hasattr(cls, 'vpp'):
723 if hasattr(cls, 'vapi'):
724 cls.logger.debug(cls.vapi.vpp.get_stats())
725 cls.logger.debug("Disconnecting class vapi client on %s",
727 cls.vapi.disconnect()
728 cls.logger.debug("Deleting class vapi attribute on %s",
732 if not cls.debug_attach and cls.vpp.returncode is None:
733 cls.wait_for_coredump()
734 cls.logger.debug("Sending TERM to vpp")
736 cls.logger.debug("Waiting for vpp to die")
738 outs, errs = cls.vpp.communicate(timeout=5)
739 except subprocess.TimeoutExpired:
741 outs, errs = cls.vpp.communicate()
742 cls.logger.debug("Deleting class vpp attribute on %s",
744 if not cls.debug_attach:
745 cls.vpp.stdout.close()
746 cls.vpp.stderr.close()
749 if cls.vpp_startup_failed:
750 stdout_log = cls.logger.info
751 stderr_log = cls.logger.critical
753 stdout_log = cls.logger.info
754 stderr_log = cls.logger.info
756 if hasattr(cls, 'vpp_stdout_deque'):
757 stdout_log(single_line_delim)
758 stdout_log('VPP output to stdout while running %s:', cls.__name__)
759 stdout_log(single_line_delim)
760 vpp_output = "".join(cls.vpp_stdout_deque)
761 with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
763 stdout_log('\n%s', vpp_output)
764 stdout_log(single_line_delim)
766 if hasattr(cls, 'vpp_stderr_deque'):
767 stderr_log(single_line_delim)
768 stderr_log('VPP output to stderr while running %s:', cls.__name__)
769 stderr_log(single_line_delim)
770 vpp_output = "".join(cls.vpp_stderr_deque)
771 with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
773 stderr_log('\n%s', vpp_output)
774 stderr_log(single_line_delim)
777 def tearDownClass(cls):
778 """ Perform final cleanup after running all tests in this test-case """
779 cls.logger.debug("--- tearDownClass() for %s called ---" %
781 cls.reporter.send_keep_alive(cls, 'tearDownClass')
783 cls.file_handler.close()
784 cls.reset_packet_infos()
786 debug_internal.on_tear_down_class(cls)
788 def show_commands_at_teardown(self):
789 """ Allow subclass specific teardown logging additions."""
790 self.logger.info("--- No test specific show commands provided. ---")
793 """ Show various debug prints after each test """
794 self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
795 (self.__class__.__name__, self._testMethodName,
796 self._testMethodDoc))
799 if not self.vpp_dead:
800 self.logger.debug(self.vapi.cli("show trace max 1000"))
801 self.logger.info(self.vapi.ppcli("show interface"))
802 self.logger.info(self.vapi.ppcli("show hardware"))
803 self.logger.info(self.statistics.set_errors_str())
804 self.logger.info(self.vapi.ppcli("show run"))
805 self.logger.info(self.vapi.ppcli("show log"))
806 self.logger.info(self.vapi.ppcli("show bihash"))
807 self.logger.info("Logging testcase specific show commands.")
808 self.show_commands_at_teardown()
809 self.registry.remove_vpp_config(self.logger)
810 # Save/Dump VPP api trace log
811 m = self._testMethodName
812 api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
813 tmp_api_trace = "/tmp/%s" % api_trace
814 vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
815 self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
816 self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
818 os.rename(tmp_api_trace, vpp_api_trace_log)
819 self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
821 except VppTransportSocketIOError:
822 self.logger.debug("VppTransportSocketIOError: Vpp dead. "
823 "Cannot log show commands.")
826 self.registry.unregister_all(self.logger)
829 """ Clear trace before running each test"""
830 super(VppTestCase, self).setUp()
831 self.reporter.send_keep_alive(self)
833 raise VppDiedError(rv=None, testcase=self.__class__.__name__,
834 method_name=self._testMethodName)
835 self.sleep(.1, "during setUp")
836 self.vpp_stdout_deque.append(
837 "--- test setUp() for %s.%s(%s) starts here ---\n" %
838 (self.__class__.__name__, self._testMethodName,
839 self._testMethodDoc))
840 self.vpp_stderr_deque.append(
841 "--- test setUp() for %s.%s(%s) starts here ---\n" %
842 (self.__class__.__name__, self._testMethodName,
843 self._testMethodDoc))
844 self.vapi.cli("clear trace")
845 # store the test instance inside the test class - so that objects
846 # holding the class can access instance methods (like assertEqual)
847 type(self).test_instance = self
850 def pg_enable_capture(cls, interfaces=None):
852 Enable capture on packet-generator interfaces
854 :param interfaces: iterable interface indexes (if None,
855 use self.pg_interfaces)
858 if interfaces is None:
859 interfaces = cls.pg_interfaces
864 def register_pcap(cls, intf, worker):
865 """ Register a pcap in the testclass """
866 # add to the list of captures with current timestamp
867 cls._pcaps.append((intf, worker))
870 def get_vpp_time(cls):
871 # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
872 # returns float("2.190522")
873 timestr = cls.vapi.cli('show clock')
874 head, sep, tail = timestr.partition(',')
875 head, sep, tail = head.partition('Time now')
879 def sleep_on_vpp_time(cls, sec):
880 """ Sleep according to time in VPP world """
881 # On a busy system with many processes
882 # we might end up with VPP time being slower than real world
883 # So take that into account when waiting for VPP to do something
884 start_time = cls.get_vpp_time()
885 while cls.get_vpp_time() - start_time < sec:
889 def pg_start(cls, trace=True):
890 """ Enable the PG, wait till it is done, then clean up """
891 for (intf, worker) in cls._old_pcaps:
892 intf.rename_old_pcap_file(intf.get_in_path(worker),
893 intf.in_history_counter)
896 cls.vapi.cli("clear trace")
897 cls.vapi.cli("trace add pg-input 1000")
898 cls.vapi.cli('packet-generator enable')
899 # PG, when starts, runs to completion -
900 # so let's avoid a race condition,
901 # and wait a little till it's done.
902 # Then clean it up - and then be gone.
903 deadline = time.time() + 300
904 while cls.vapi.cli('show packet-generator').find("Yes") != -1:
905 cls.sleep(0.01) # yield
906 if time.time() > deadline:
907 cls.logger.error("Timeout waiting for pg to stop")
909 for intf, worker in cls._pcaps:
910 cls.vapi.cli('packet-generator delete %s' %
911 intf.get_cap_name(worker))
912 cls._old_pcaps = cls._pcaps
916 def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
918 Create packet-generator interfaces.
920 :param interfaces: iterable indexes of the interfaces.
921 :returns: List of created interfaces.
926 intf = VppPGInterface(cls, i, gso, gso_size)
927 setattr(cls, intf.name, intf)
929 cls.pg_interfaces = result
933 def create_loopback_interfaces(cls, count):
935 Create loopback interfaces.
937 :param count: number of interfaces created.
938 :returns: List of created interfaces.
940 result = [VppLoInterface(cls) for i in range(count)]
942 setattr(cls, intf.name, intf)
943 cls.lo_interfaces = result
947 def create_bvi_interfaces(cls, count):
949 Create BVI interfaces.
951 :param count: number of interfaces created.
952 :returns: List of created interfaces.
954 result = [VppBviInterface(cls) for i in range(count)]
956 setattr(cls, intf.name, intf)
957 cls.bvi_interfaces = result
961 def extend_packet(packet, size, padding=' '):
963 Extend packet to given size by padding with spaces or custom padding
964 NOTE: Currently works only when Raw layer is present.
966 :param packet: packet
967 :param size: target size
968 :param padding: padding used to extend the payload
971 packet_len = len(packet) + 4
972 extend = size - packet_len
974 num = (extend // len(padding)) + 1
975 packet[Raw].load += (padding * num)[:extend].encode("ascii")
978 def reset_packet_infos(cls):
979 """ Reset the list of packet info objects and packet counts to zero """
980 cls._packet_infos = {}
981 cls._packet_count_for_dst_if_idx = {}
984 def create_packet_info(cls, src_if, dst_if):
986 Create packet info object containing the source and destination indexes
987 and add it to the testcase's packet info list
989 :param VppInterface src_if: source interface
990 :param VppInterface dst_if: destination interface
992 :returns: _PacketInfo object
996 info.index = len(cls._packet_infos)
997 info.src = src_if.sw_if_index
998 info.dst = dst_if.sw_if_index
999 if isinstance(dst_if, VppSubInterface):
1000 dst_idx = dst_if.parent.sw_if_index
1002 dst_idx = dst_if.sw_if_index
1003 if dst_idx in cls._packet_count_for_dst_if_idx:
1004 cls._packet_count_for_dst_if_idx[dst_idx] += 1
1006 cls._packet_count_for_dst_if_idx[dst_idx] = 1
1007 cls._packet_infos[info.index] = info
1011 def info_to_payload(info):
1013 Convert _PacketInfo object to packet payload
1015 :param info: _PacketInfo object
1017 :returns: string containing serialized data from packet info
1019 return "%d %d %d %d %d" % (info.index, info.src, info.dst,
1020 info.ip, info.proto)
1023 def payload_to_info(payload, payload_field='load'):
1025 Convert packet payload to _PacketInfo object
1027 :param payload: packet payload
1028 :type payload: <class 'scapy.packet.Raw'>
1029 :param payload_field: packet fieldname of payload "load" for
1030 <class 'scapy.packet.Raw'>
1031 :type payload_field: str
1032 :returns: _PacketInfo object containing de-serialized data from payload
1035 numbers = getattr(payload, payload_field).split()
1036 info = _PacketInfo()
1037 info.index = int(numbers[0])
1038 info.src = int(numbers[1])
1039 info.dst = int(numbers[2])
1040 info.ip = int(numbers[3])
1041 info.proto = int(numbers[4])
1044 def get_next_packet_info(self, info):
1046 Iterate over the packet info list stored in the testcase
1047 Start iteration with first element if info is None
1048 Continue based on index in info if info is specified
1050 :param info: info or None
1051 :returns: next info in list or None if no more infos
1056 next_index = info.index + 1
1057 if next_index == len(self._packet_infos):
1060 return self._packet_infos[next_index]
1062 def get_next_packet_info_for_interface(self, src_index, info):
1064 Search the packet info list for the next packet info with same source
1067 :param src_index: source interface index to search for
1068 :param info: packet info - where to start the search
1069 :returns: packet info or None
1073 info = self.get_next_packet_info(info)
1076 if info.src == src_index:
1079 def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
1081 Search the packet info list for the next packet info with same source
1082 and destination interface indexes
1084 :param src_index: source interface index to search for
1085 :param dst_index: destination interface index to search for
1086 :param info: packet info - where to start the search
1087 :returns: packet info or None
1091 info = self.get_next_packet_info_for_interface(src_index, info)
1094 if info.dst == dst_index:
1097 def assert_equal(self, real_value, expected_value, name_or_class=None):
1098 if name_or_class is None:
1099 self.assertEqual(real_value, expected_value)
1102 msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
1103 msg = msg % (getdoc(name_or_class).strip(),
1104 real_value, str(name_or_class(real_value)),
1105 expected_value, str(name_or_class(expected_value)))
1107 msg = "Invalid %s: %s does not match expected value %s" % (
1108 name_or_class, real_value, expected_value)
1110 self.assertEqual(real_value, expected_value, msg)
1112 def assert_in_range(self,
1120 msg = "Invalid %s: %s out of range <%s,%s>" % (
1121 name, real_value, expected_min, expected_max)
1122 self.assertTrue(expected_min <= real_value <= expected_max, msg)
1124 def assert_packet_checksums_valid(self, packet,
1125 ignore_zero_udp_checksums=True):
1126 received = packet.__class__(scapy.compat.raw(packet))
1127 udp_layers = ['UDP', 'UDPerror']
1128 checksum_fields = ['cksum', 'chksum']
1131 temp = received.__class__(scapy.compat.raw(received))
1133 layer = temp.getlayer(counter)
1135 layer = layer.copy()
1136 layer.remove_payload()
1137 for cf in checksum_fields:
1138 if hasattr(layer, cf):
1139 if ignore_zero_udp_checksums and \
1140 0 == getattr(layer, cf) and \
1141 layer.name in udp_layers:
1143 delattr(temp.getlayer(counter), cf)
1144 checksums.append((counter, cf))
1147 counter = counter + 1
1148 if 0 == len(checksums):
1150 temp = temp.__class__(scapy.compat.raw(temp))
1151 for layer, cf in checksums:
1152 calc_sum = getattr(temp[layer], cf)
1154 getattr(received[layer], cf), calc_sum,
1155 "packet checksum on layer #%d: %s" % (layer, temp[layer].name))
1157 "Checksum field `%s` on `%s` layer has correct value `%s`" %
1158 (cf, temp[layer].name, calc_sum))
1160 def assert_checksum_valid(self, received_packet, layer,
1161 field_name='chksum',
1162 ignore_zero_checksum=False):
1163 """ Check checksum of received packet on given layer """
1164 received_packet_checksum = getattr(received_packet[layer], field_name)
1165 if ignore_zero_checksum and 0 == received_packet_checksum:
1167 recalculated = received_packet.__class__(
1168 scapy.compat.raw(received_packet))
1169 delattr(recalculated[layer], field_name)
1170 recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
1171 self.assert_equal(received_packet_checksum,
1172 getattr(recalculated[layer], field_name),
1173 "packet checksum on layer: %s" % layer)
1175 def assert_ip_checksum_valid(self, received_packet,
1176 ignore_zero_checksum=False):
1177 self.assert_checksum_valid(received_packet, 'IP',
1178 ignore_zero_checksum=ignore_zero_checksum)
1180 def assert_tcp_checksum_valid(self, received_packet,
1181 ignore_zero_checksum=False):
1182 self.assert_checksum_valid(received_packet, 'TCP',
1183 ignore_zero_checksum=ignore_zero_checksum)
1185 def assert_udp_checksum_valid(self, received_packet,
1186 ignore_zero_checksum=True):
1187 self.assert_checksum_valid(received_packet, 'UDP',
1188 ignore_zero_checksum=ignore_zero_checksum)
1190 def assert_embedded_icmp_checksum_valid(self, received_packet):
1191 if received_packet.haslayer(IPerror):
1192 self.assert_checksum_valid(received_packet, 'IPerror')
1193 if received_packet.haslayer(TCPerror):
1194 self.assert_checksum_valid(received_packet, 'TCPerror')
1195 if received_packet.haslayer(UDPerror):
1196 self.assert_checksum_valid(received_packet, 'UDPerror',
1197 ignore_zero_checksum=True)
1198 if received_packet.haslayer(ICMPerror):
1199 self.assert_checksum_valid(received_packet, 'ICMPerror')
1201 def assert_icmp_checksum_valid(self, received_packet):
1202 self.assert_checksum_valid(received_packet, 'ICMP')
1203 self.assert_embedded_icmp_checksum_valid(received_packet)
1205 def assert_icmpv6_checksum_valid(self, pkt):
1206 if pkt.haslayer(ICMPv6DestUnreach):
1207 self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
1208 self.assert_embedded_icmp_checksum_valid(pkt)
1209 if pkt.haslayer(ICMPv6EchoRequest):
1210 self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
1211 if pkt.haslayer(ICMPv6EchoReply):
1212 self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
1214 def get_packet_counter(self, counter):
1215 if counter.startswith("/"):
1216 counter_value = self.statistics.get_counter(counter)
1218 counters = self.vapi.cli("sh errors").split('\n')
1220 for i in range(1, len(counters) - 1):
1221 results = counters[i].split()
1222 if results[1] == counter:
1223 counter_value = int(results[0])
1225 return counter_value
1227 def assert_packet_counter_equal(self, counter, expected_value):
1228 counter_value = self.get_packet_counter(counter)
1229 self.assert_equal(counter_value, expected_value,
1230 "packet counter `%s'" % counter)
1232 def assert_error_counter_equal(self, counter, expected_value):
1233 counter_value = self.statistics[counter].sum()
1234 self.assert_equal(counter_value, expected_value,
1235 "error counter `%s'" % counter)
1238 def sleep(cls, timeout, remark=None):
1240 # /* Allow sleep(0) to maintain win32 semantics, and as decreed
1241 # * by Guido, only the main thread can be interrupted.
1243 # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
1246 if hasattr(os, 'sched_yield'):
1252 cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
1253 before = time.time()
1256 if after - before > 2 * timeout:
1257 cls.logger.error("unexpected self.sleep() result - "
1258 "slept for %es instead of ~%es!",
1259 after - before, timeout)
1262 "Finished sleep (%s) - slept %es (wanted %es)",
1263 remark, after - before, timeout)
1265 def pg_send(self, intf, pkts, worker=None, trace=True):
1266 intf.add_stream(pkts, worker=worker)
1267 self.pg_enable_capture(self.pg_interfaces)
1268 self.pg_start(trace=trace)
1270 def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
1271 self.pg_send(intf, pkts)
1274 for i in self.pg_interfaces:
1275 i.get_capture(0, timeout=timeout)
1276 i.assert_nothing_captured(remark=remark)
1279 def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None,
1283 self.pg_send(intf, pkts, worker=worker, trace=trace)
1284 rx = output.get_capture(n_rx)
1287 def send_and_expect_only(self, intf, pkts, output, timeout=None):
1288 self.pg_send(intf, pkts)
1289 rx = output.get_capture(len(pkts))
1293 for i in self.pg_interfaces:
1294 if i not in outputs:
1295 i.get_capture(0, timeout=timeout)
1296 i.assert_nothing_captured()
1302 def get_testcase_doc_name(test):
1303 return getdoc(test.__class__).splitlines()[0]
1306 def get_test_description(descriptions, test):
1307 short_description = test.shortDescription()
1308 if descriptions and short_description:
1309 return short_description
1314 class TestCaseInfo(object):
1315 def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
1316 self.logger = logger
1317 self.tempdir = tempdir
1318 self.vpp_pid = vpp_pid
1319 self.vpp_bin_path = vpp_bin_path
1320 self.core_crash_test = None
1323 class VppTestResult(unittest.TestResult):
1325 @property result_string
1326 String variable to store the test case result string.
1328 List variable containing 2-tuples of TestCase instances and strings
1329 holding formatted tracebacks. Each tuple represents a test which
1330 raised an unexpected exception.
1332 List variable containing 2-tuples of TestCase instances and strings
1333 holding formatted tracebacks. Each tuple represents a test where
1334 a failure was explicitly signalled using the TestCase.assert*()
1338 failed_test_cases_info = set()
1339 core_crash_test_cases_info = set()
1340 current_test_case_info = None
1342 def __init__(self, stream=None, descriptions=None, verbosity=None,
1345 :param stream File descriptor to store where to report test results.
1346 Set to the standard error stream by default.
1347 :param descriptions Boolean variable to store information if to use
1348 test case descriptions.
1349 :param verbosity Integer variable to store required verbosity level.
1351 super(VppTestResult, self).__init__(stream, descriptions, verbosity)
1352 self.stream = stream
1353 self.descriptions = descriptions
1354 self.verbosity = verbosity
1355 self.result_string = None
1356 self.runner = runner
1359 def addSuccess(self, test):
1361 Record a test succeeded result
1366 if self.current_test_case_info:
1367 self.current_test_case_info.logger.debug(
1368 "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
1369 test._testMethodName,
1370 test._testMethodDoc))
1371 unittest.TestResult.addSuccess(self, test)
1372 self.result_string = colorize("OK", GREEN)
1374 self.send_result_through_pipe(test, PASS)
1376 def addSkip(self, test, reason):
1378 Record a test skipped.
1384 if self.current_test_case_info:
1385 self.current_test_case_info.logger.debug(
1386 "--- addSkip() %s.%s(%s) called, reason is %s" %
1387 (test.__class__.__name__, test._testMethodName,
1388 test._testMethodDoc, reason))
1389 unittest.TestResult.addSkip(self, test, reason)
1390 self.result_string = colorize("SKIP", YELLOW)
1392 if reason == "not enough cpus":
1393 self.send_result_through_pipe(test, SKIP_CPU_SHORTAGE)
1395 self.send_result_through_pipe(test, SKIP)
1397 def symlink_failed(self):
1398 if self.current_test_case_info:
1400 failed_dir = os.getenv('FAILED_DIR')
1401 link_path = os.path.join(
1404 os.path.basename(self.current_test_case_info.tempdir))
1406 self.current_test_case_info.logger.debug(
1407 "creating a link to the failed test")
1408 self.current_test_case_info.logger.debug(
1409 "os.symlink(%s, %s)" %
1410 (self.current_test_case_info.tempdir, link_path))
1411 if os.path.exists(link_path):
1412 self.current_test_case_info.logger.debug(
1413 'symlink already exists')
1415 os.symlink(self.current_test_case_info.tempdir, link_path)
1417 except Exception as e:
1418 self.current_test_case_info.logger.error(e)
1420 def send_result_through_pipe(self, test, result):
1421 if hasattr(self, 'test_framework_result_pipe'):
1422 pipe = self.test_framework_result_pipe
1424 pipe.send((test.id(), result))
1426 def log_error(self, test, err, fn_name):
1427 if self.current_test_case_info:
1428 if isinstance(test, unittest.suite._ErrorHolder):
1429 test_name = test.description
1431 test_name = '%s.%s(%s)' % (test.__class__.__name__,
1432 test._testMethodName,
1433 test._testMethodDoc)
1434 self.current_test_case_info.logger.debug(
1435 "--- %s() %s called, err is %s" %
1436 (fn_name, test_name, err))
1437 self.current_test_case_info.logger.debug(
1438 "formatted exception is:\n%s" %
1439 "".join(format_exception(*err)))
1441 def add_error(self, test, err, unittest_fn, error_type):
1442 if error_type == FAIL:
1443 self.log_error(test, err, 'addFailure')
1444 error_type_str = colorize("FAIL", RED)
1445 elif error_type == ERROR:
1446 self.log_error(test, err, 'addError')
1447 error_type_str = colorize("ERROR", RED)
1449 raise Exception('Error type %s cannot be used to record an '
1450 'error or a failure' % error_type)
1452 unittest_fn(self, test, err)
1453 if self.current_test_case_info:
1454 self.result_string = "%s [ temp dir used by test case: %s ]" % \
1456 self.current_test_case_info.tempdir)
1457 self.symlink_failed()
1458 self.failed_test_cases_info.add(self.current_test_case_info)
1459 if is_core_present(self.current_test_case_info.tempdir):
1460 if not self.current_test_case_info.core_crash_test:
1461 if isinstance(test, unittest.suite._ErrorHolder):
1462 test_name = str(test)
1464 test_name = "'{!s}' ({!s})".format(
1465 get_testcase_doc_name(test), test.id())
1466 self.current_test_case_info.core_crash_test = test_name
1467 self.core_crash_test_cases_info.add(
1468 self.current_test_case_info)
1470 self.result_string = '%s [no temp dir]' % error_type_str
1472 self.send_result_through_pipe(test, error_type)
1474 def addFailure(self, test, err):
1476 Record a test failed result
1479 :param err: error message
1482 self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
1484 def addError(self, test, err):
1486 Record a test error result
1489 :param err: error message
1492 self.add_error(test, err, unittest.TestResult.addError, ERROR)
1494 def getDescription(self, test):
1496 Get test description
1499 :returns: test description
1502 return get_test_description(self.descriptions, test)
1504 def startTest(self, test):
1512 def print_header(test):
1513 if test.__class__ in self.printed:
1516 test_doc = getdoc(test)
1518 raise Exception("No doc string for test '%s'" % test.id())
1520 test_title = test_doc.splitlines()[0]
1521 test_title = colorize(test_title, GREEN)
1522 if test.is_tagged_run_solo():
1523 test_title = colorize(f"SOLO RUN: {test_title}", YELLOW)
1525 # This block may overwrite the colorized title above,
1526 # but we want this to stand out and be fixed
1527 if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
1528 test_title = colorize(
1529 f"FIXME with VPP workers: {test_title}", RED)
1531 if test.__class__.skipped_due_to_cpu_lack:
1532 test_title = colorize(
1533 f"{test_title} [skipped - not enough cpus, "
1534 f"required={test.__class__.get_cpus_required()}, "
1535 f"available={max_vpp_cpus}]", YELLOW)
1537 print(double_line_delim)
1539 print(double_line_delim)
1540 self.printed.append(test.__class__)
1543 self.start_test = time.time()
1544 unittest.TestResult.startTest(self, test)
1545 if self.verbosity > 0:
1546 self.stream.writeln(
1547 "Starting " + self.getDescription(test) + " ...")
1548 self.stream.writeln(single_line_delim)
1550 def stopTest(self, test):
1552 Called when the given test has been run
1557 unittest.TestResult.stopTest(self, test)
1559 if self.verbosity > 0:
1560 self.stream.writeln(single_line_delim)
1561 self.stream.writeln("%-73s%s" % (self.getDescription(test),
1562 self.result_string))
1563 self.stream.writeln(single_line_delim)
1565 self.stream.writeln("%-68s %4.2f %s" %
1566 (self.getDescription(test),
1567 time.time() - self.start_test,
1568 self.result_string))
1570 self.send_result_through_pipe(test, TEST_RUN)
1572 def printErrors(self):
1574 Print errors from running the test case
1576 if len(self.errors) > 0 or len(self.failures) > 0:
1577 self.stream.writeln()
1578 self.printErrorList('ERROR', self.errors)
1579 self.printErrorList('FAIL', self.failures)
1581 # ^^ that is the last output from unittest before summary
1582 if not self.runner.print_summary:
1583 devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
1584 self.stream = devnull
1585 self.runner.stream = devnull
1587 def printErrorList(self, flavour, errors):
1589 Print error list to the output stream together with error type
1590 and test case description.
1592 :param flavour: error type
1593 :param errors: iterable errors
1596 for test, err in errors:
1597 self.stream.writeln(double_line_delim)
1598 self.stream.writeln("%s: %s" %
1599 (flavour, self.getDescription(test)))
1600 self.stream.writeln(single_line_delim)
1601 self.stream.writeln("%s" % err)
1604 class VppTestRunner(unittest.TextTestRunner):
1606 A basic test runner implementation which prints results to standard error.
1610 def resultclass(self):
1611 """Class maintaining the results of the tests"""
1612 return VppTestResult
1614 def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
1615 result_pipe=None, failfast=False, buffer=False,
1616 resultclass=None, print_summary=True, **kwargs):
1617 # ignore stream setting here, use hard-coded stdout to be in sync
1618 # with prints from VppTestCase methods ...
1619 super(VppTestRunner, self).__init__(sys.stdout, descriptions,
1620 verbosity, failfast, buffer,
1621 resultclass, **kwargs)
1622 KeepAliveReporter.pipe = keep_alive_pipe
1624 self.orig_stream = self.stream
1625 self.resultclass.test_framework_result_pipe = result_pipe
1627 self.print_summary = print_summary
1629 def _makeResult(self):
1630 return self.resultclass(self.stream,
1635 def run(self, test):
1642 faulthandler.enable() # emit stack trace to stderr if killed by signal
1644 result = super(VppTestRunner, self).run(test)
1645 if not self.print_summary:
1646 self.stream = self.orig_stream
1647 result.stream = self.orig_stream
1651 class Worker(Thread):
1652 def __init__(self, executable_args, logger, env=None, *args, **kwargs):
1653 super(Worker, self).__init__(*args, **kwargs)
1654 self.logger = logger
1655 self.args = executable_args
1656 if hasattr(self, 'testcase') and self.testcase.debug_all:
1657 if self.testcase.debug_gdbserver:
1658 self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
1659 .format(port=self.testcase.gdbserver_port)] + args
1660 elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
1661 self.args.append(self.wait_for_gdb)
1662 self.app_bin = executable_args[0]
1663 self.app_name = os.path.basename(self.app_bin)
1664 if hasattr(self, 'role'):
1665 self.app_name += ' {role}'.format(role=self.role)
1668 env = {} if env is None else env
1669 self.env = copy.deepcopy(env)
1671 def wait_for_enter(self):
1672 if not hasattr(self, 'testcase'):
1674 if self.testcase.debug_all and self.testcase.debug_gdbserver:
1676 print(double_line_delim)
1677 print("Spawned GDB Server for '{app}' with PID: {pid}"
1678 .format(app=self.app_name, pid=self.process.pid))
1679 elif self.testcase.debug_all and self.testcase.debug_gdb:
1681 print(double_line_delim)
1682 print("Spawned '{app}' with PID: {pid}"
1683 .format(app=self.app_name, pid=self.process.pid))
1686 print(single_line_delim)
1687 print("You can debug '{app}' using:".format(app=self.app_name))
1688 if self.testcase.debug_gdbserver:
1689 print("sudo gdb " + self.app_bin +
1690 " -ex 'target remote localhost:{port}'"
1691 .format(port=self.testcase.gdbserver_port))
1692 print("Now is the time to attach gdb by running the above "
1693 "command, set up breakpoints etc., then resume from "
1694 "within gdb by issuing the 'continue' command")
1695 self.testcase.gdbserver_port += 1
1696 elif self.testcase.debug_gdb:
1697 print("sudo gdb " + self.app_bin +
1698 " -ex 'attach {pid}'".format(pid=self.process.pid))
1699 print("Now is the time to attach gdb by running the above "
1700 "command and set up breakpoints etc., then resume from"
1701 " within gdb by issuing the 'continue' command")
1702 print(single_line_delim)
1703 input("Press ENTER to continue running the testcase...")
1706 executable = self.args[0]
1707 if not os.path.exists(executable) or not os.access(
1708 executable, os.F_OK | os.X_OK):
1709 # Exit code that means some system file did not exist,
1710 # could not be opened, or had some other kind of error.
1711 self.result = os.EX_OSFILE
1712 raise EnvironmentError(
1713 "executable '%s' is not found or executable." % executable)
1714 self.logger.debug("Running executable: '{app}'"
1715 .format(app=' '.join(self.args)))
1716 env = os.environ.copy()
1717 env.update(self.env)
1718 env["CK_LOG_FILE_NAME"] = "-"
1719 self.process = subprocess.Popen(
1720 self.args, shell=False, env=env, preexec_fn=os.setpgrp,
1721 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1722 self.wait_for_enter()
1723 out, err = self.process.communicate()
1724 self.logger.debug("Finished running `{app}'".format(app=self.app_name))
1725 self.logger.info("Return code is `%s'" % self.process.returncode)
1726 self.logger.info(single_line_delim)
1727 self.logger.info("Executable `{app}' wrote to stdout:"
1728 .format(app=self.app_name))
1729 self.logger.info(single_line_delim)
1730 self.logger.info(out.decode('utf-8'))
1731 self.logger.info(single_line_delim)
1732 self.logger.info("Executable `{app}' wrote to stderr:"
1733 .format(app=self.app_name))
1734 self.logger.info(single_line_delim)
1735 self.logger.info(err.decode('utf-8'))
1736 self.logger.info(single_line_delim)
1737 self.result = self.process.returncode
1740 if __name__ == '__main__':