3 from __future__ import print_function
19 from collections import deque
20 from threading import Thread, Event
21 from inspect import getdoc, isclass
22 from traceback import format_exception
23 from logging import FileHandler, DEBUG, Formatter
27 from scapy.packet import Raw
28 import hook as hookmodule
29 from vpp_pg_interface import VppPGInterface
30 from vpp_sub_interface import VppSubInterface
31 from vpp_lo_interface import VppLoInterface
32 from vpp_bvi_interface import VppBviInterface
33 from vpp_papi_provider import VppPapiProvider
35 from vpp_papi.vpp_stats import VPPStats
36 from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
37 from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
39 from vpp_object import VppObjectRegistry
40 from util import ppp, is_core_present
41 from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
42 from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
43 from scapy.layers.inet6 import ICMPv6EchoReply
45 logger = logging.getLogger(__name__)
47 # Set up an empty logger for the testcase that can be overridden as necessary
48 null_logger = logging.getLogger('VppTestCase')
49 null_logger.addHandler(logging.NullHandler())
58 class BoolEnvironmentVariable(object):
60 def __init__(self, env_var_name, default='n', true_values=None):
61 self.name = env_var_name
62 self.default = default
63 self.true_values = true_values if true_values is not None else \
67 return os.getenv(self.name, self.default).lower() in self.true_values
69 if sys.version_info[0] == 2:
70 __nonzero__ = __bool__
73 return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
74 (self.name, self.default, self.true_values)
77 debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
82 Test framework module.
84 The module provides a set of tools for constructing and running tests and
85 representing the results.
89 class VppDiedError(Exception):
90 """ exception for reporting that the subprocess has died."""
92 signals_by_value = {v: k for k, v in signal.__dict__.items() if
93 k.startswith('SIG') and not k.startswith('SIG_')}
95 def __init__(self, rv=None, testcase=None, method_name=None):
97 self.signal_name = None
98 self.testcase = testcase
99 self.method_name = method_name
102 self.signal_name = VppDiedError.signals_by_value[-rv]
103 except (KeyError, TypeError):
106 if testcase is None and method_name is None:
109 in_msg = ' while running %s.%s' % (testcase, method_name)
112 msg = "VPP subprocess died unexpectedly%s with return code: %d%s."\
113 % (in_msg, self.rv, ' [%s]' %
115 self.signal_name is not None else ''))
117 msg = "VPP subprocess died unexpectedly%s." % in_msg
119 super(VppDiedError, self).__init__(msg)
122 class _PacketInfo(object):
123 """Private class to create packet info object.
125 Help process information about the next packet.
126 Set variables to default values.
128 #: Store the index of the packet.
130 #: Store the index of the source packet generator interface of the packet.
132 #: Store the index of the destination packet generator interface
135 #: Store expected ip version
137 #: Store expected upper protocol
139 #: Store the copy of the former packet.
142 def __eq__(self, other):
143 index = self.index == other.index
144 src = self.src == other.src
145 dst = self.dst == other.dst
146 data = self.data == other.data
147 return index and src and dst and data
150 def pump_output(testclass):
151 """ pump output from vpp stdout/stderr to proper queues """
154 while not testclass.pump_thread_stop_flag.is_set():
155 readable = select.select([testclass.vpp.stdout.fileno(),
156 testclass.vpp.stderr.fileno(),
157 testclass.pump_thread_wakeup_pipe[0]],
159 if testclass.vpp.stdout.fileno() in readable:
160 read = os.read(testclass.vpp.stdout.fileno(), 102400)
162 split = read.decode('ascii',
163 errors='backslashreplace').splitlines(True)
164 if len(stdout_fragment) > 0:
165 split[0] = "%s%s" % (stdout_fragment, split[0])
166 if len(split) > 0 and split[-1].endswith("\n"):
170 stdout_fragment = split[-1]
171 testclass.vpp_stdout_deque.extend(split[:limit])
172 if not testclass.cache_vpp_output:
173 for line in split[:limit]:
174 testclass.logger.info(
175 "VPP STDOUT: %s" % line.rstrip("\n"))
176 if testclass.vpp.stderr.fileno() in readable:
177 read = os.read(testclass.vpp.stderr.fileno(), 102400)
179 split = read.decode('ascii',
180 errors='backslashreplace').splitlines(True)
181 if len(stderr_fragment) > 0:
182 split[0] = "%s%s" % (stderr_fragment, split[0])
183 if len(split) > 0 and split[-1].endswith("\n"):
187 stderr_fragment = split[-1]
189 testclass.vpp_stderr_deque.extend(split[:limit])
190 if not testclass.cache_vpp_output:
191 for line in split[:limit]:
192 testclass.logger.error(
193 "VPP STDERR: %s" % line.rstrip("\n"))
194 # ignoring the dummy pipe here intentionally - the
195 # flag will take care of properly terminating the loop
198 def _is_skip_aarch64_set():
199 return BoolEnvironmentVariable('SKIP_AARCH64')
202 is_skip_aarch64_set = _is_skip_aarch64_set()
205 def _is_platform_aarch64():
206 return platform.machine() == 'aarch64'
209 is_platform_aarch64 = _is_platform_aarch64()
212 def _running_extended_tests():
213 return BoolEnvironmentVariable("EXTENDED_TESTS")
216 running_extended_tests = _running_extended_tests()
219 def _running_gcov_tests():
220 return BoolEnvironmentVariable("GCOV_TESTS")
223 running_gcov_tests = _running_gcov_tests()
226 class KeepAliveReporter(object):
228 Singleton object which reports test start to parent process
233 self.__dict__ = self._shared_state
241 def pipe(self, pipe):
242 if self._pipe is not None:
243 raise Exception("Internal error - pipe should only be set once.")
246 def send_keep_alive(self, test, desc=None):
248 Write current test tmpdir & desc to keep-alive pipe to signal liveness
250 if self.pipe is None:
251 # if not running forked..
255 desc = '%s (%s)' % (desc, unittest.util.strclass(test))
259 self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
262 class TestCaseTag(Enum):
263 # marks the suites that must run at the end
264 # using only a single test runner
266 # marks the suites broken on VPP multi-worker
267 FIXME_VPP_WORKERS = 2
270 def create_tag_decorator(e):
273 cls.test_tags.append(e)
274 except AttributeError:
280 tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
281 tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
284 class VppTestCase(unittest.TestCase):
285 """This subclass is a base class for VPP test cases that are implemented as
286 classes. It provides methods to create and run test case.
289 extra_vpp_punt_config = []
290 extra_vpp_plugin_config = []
292 vapi_response_timeout = 5
295 def packet_infos(self):
296 """List of packet infos"""
297 return self._packet_infos
300 def get_packet_count_for_if_idx(cls, dst_if_index):
301 """Get the number of packet info for specified destination if index"""
302 if dst_if_index in cls._packet_count_for_dst_if_idx:
303 return cls._packet_count_for_dst_if_idx[dst_if_index]
308 def has_tag(cls, tag):
309 """ if the test case has a given tag - return true """
311 return tag in cls.test_tags
312 except AttributeError:
317 def is_tagged_run_solo(cls):
318 """ if the test case class is timing-sensitive - return true """
319 return cls.has_tag(TestCaseTag.RUN_SOLO)
323 """Return the instance of this testcase"""
324 return cls.test_instance
327 def set_debug_flags(cls, d):
328 cls.gdbserver_port = 7777
329 cls.debug_core = False
330 cls.debug_gdb = False
331 cls.debug_gdbserver = False
332 cls.debug_all = False
337 cls.debug_core = True
338 elif dl == "gdb" or dl == "gdb-all":
340 elif dl == "gdbserver" or dl == "gdbserver-all":
341 cls.debug_gdbserver = True
343 raise Exception("Unrecognized DEBUG option: '%s'" % d)
344 if dl == "gdb-all" or dl == "gdbserver-all":
348 def get_least_used_cpu():
349 cpu_usage_list = [set(range(psutil.cpu_count()))]
350 vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
351 if 'vpp_main' == p.info['name']]
352 for vpp_process in vpp_processes:
353 for cpu_usage_set in cpu_usage_list:
355 cpu_num = vpp_process.cpu_num()
356 if cpu_num in cpu_usage_set:
357 cpu_usage_set_index = cpu_usage_list.index(
359 if cpu_usage_set_index == len(cpu_usage_list) - 1:
360 cpu_usage_list.append({cpu_num})
362 cpu_usage_list[cpu_usage_set_index + 1].add(
364 cpu_usage_set.remove(cpu_num)
366 except psutil.NoSuchProcess:
369 for cpu_usage_set in cpu_usage_list:
370 if len(cpu_usage_set) > 0:
371 min_usage_set = cpu_usage_set
374 return random.choice(tuple(min_usage_set))
377 def setUpConstants(cls):
378 """ Set-up the test case class based on environment variables """
379 cls.step = BoolEnvironmentVariable('STEP')
380 d = os.getenv("DEBUG", None)
381 # inverted case to handle '' == True
382 c = os.getenv("CACHE_OUTPUT", "1")
383 cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
384 cls.set_debug_flags(d)
385 cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
386 cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
387 cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
388 cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
390 if cls.plugin_path is not None:
391 if cls.extern_plugin_path is not None:
392 plugin_path = "%s:%s" % (
393 cls.plugin_path, cls.extern_plugin_path)
395 plugin_path = cls.plugin_path
396 elif cls.extern_plugin_path is not None:
397 plugin_path = cls.extern_plugin_path
399 if cls.step or cls.debug_gdb or cls.debug_gdbserver:
400 debug_cli = "cli-listen localhost:5002"
402 size = os.getenv("COREDUMP_SIZE")
404 coredump_size = "coredump-size %s" % size
405 if coredump_size is None:
406 coredump_size = "coredump-size unlimited"
408 cpu_core_number = cls.get_least_used_cpu()
409 if not hasattr(cls, "vpp_worker_count"):
410 cls.vpp_worker_count = 0
411 worker_config = os.getenv("VPP_WORKER_CONFIG", "")
413 elems = worker_config.split(" ")
414 if elems[0] != "workers" or len(elems) != 2:
415 raise ValueError("Wrong VPP_WORKER_CONFIG == '%s' value." %
417 cls.vpp_worker_count = int(elems[1])
418 if cls.vpp_worker_count > 0 and\
419 cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
420 cls.vpp_worker_count = 0
422 default_variant = os.getenv("VARIANT")
423 if default_variant is not None:
424 default_variant = "defaults { %s 100 }" % default_variant
428 api_fuzzing = os.getenv("API_FUZZ")
429 if api_fuzzing is None:
434 "unix", "{", "nodaemon", debug_cli, "full-coredump",
435 coredump_size, "runtime-dir", cls.tempdir, "}",
436 "api-trace", "{", "on", "}",
437 "api-segment", "{", "prefix", cls.shm_prefix, "}",
438 "cpu", "{", "main-core", str(cpu_core_number), ]
439 if cls.vpp_worker_count:
440 cls.vpp_cmdline.extend(["workers", str(cls.vpp_worker_count)])
441 cls.vpp_cmdline.extend([
443 "physmem", "{", "max-size", "32m", "}",
444 "statseg", "{", "socket-name", cls.stats_sock, "}",
445 "socksvr", "{", "socket-name", cls.api_sock, "}",
446 "node { ", default_variant, "}",
447 "api-fuzz {", api_fuzzing, "}",
448 "plugins", "{", "plugin", "dpdk_plugin.so", "{", "disable", "}",
449 "plugin", "rdma_plugin.so", "{", "disable", "}",
450 "plugin", "lisp_unittest_plugin.so", "{", "enable", "}",
451 "plugin", "unittest_plugin.so", "{", "enable", "}"
452 ] + cls.extra_vpp_plugin_config + ["}", ])
454 if cls.extra_vpp_punt_config is not None:
455 cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
456 if plugin_path is not None:
457 cls.vpp_cmdline.extend(["plugin_path", plugin_path])
458 if cls.test_plugin_path is not None:
459 cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
461 cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
462 cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
465 def wait_for_enter(cls):
466 if cls.debug_gdbserver:
467 print(double_line_delim)
468 print("Spawned GDB server with PID: %d" % cls.vpp.pid)
470 print(double_line_delim)
471 print("Spawned VPP with PID: %d" % cls.vpp.pid)
473 cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
475 print(single_line_delim)
476 print("You can debug VPP using:")
477 if cls.debug_gdbserver:
478 print("sudo gdb " + cls.vpp_bin +
479 " -ex 'target remote localhost:{port}'"
480 .format(port=cls.gdbserver_port))
481 print("Now is the time to attach gdb by running the above "
482 "command, set up breakpoints etc., then resume VPP from "
483 "within gdb by issuing the 'continue' command")
484 cls.gdbserver_port += 1
486 print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
487 print("Now is the time to attach gdb by running the above "
488 "command and set up breakpoints etc., then resume VPP from"
489 " within gdb by issuing the 'continue' command")
490 print(single_line_delim)
491 input("Press ENTER to continue running the testcase...")
495 cmdline = cls.vpp_cmdline
497 if cls.debug_gdbserver:
498 gdbserver = '/usr/bin/gdbserver'
499 if not os.path.isfile(gdbserver) or \
500 not os.access(gdbserver, os.X_OK):
501 raise Exception("gdbserver binary '%s' does not exist or is "
502 "not executable" % gdbserver)
504 cmdline = [gdbserver, 'localhost:{port}'
505 .format(port=cls.gdbserver_port)] + cls.vpp_cmdline
506 cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
509 cls.vpp = subprocess.Popen(cmdline,
510 stdout=subprocess.PIPE,
511 stderr=subprocess.PIPE)
512 except subprocess.CalledProcessError as e:
513 cls.logger.critical("Subprocess returned with non-0 return code: ("
517 cls.logger.critical("Subprocess returned with OS error: "
518 "(%s) %s", e.errno, e.strerror)
520 except Exception as e:
521 cls.logger.exception("Subprocess returned unexpected from "
528 def wait_for_coredump(cls):
529 corefile = cls.tempdir + "/core"
530 if os.path.isfile(corefile):
531 cls.logger.error("Waiting for coredump to complete: %s", corefile)
532 curr_size = os.path.getsize(corefile)
533 deadline = time.time() + 60
535 while time.time() < deadline:
538 curr_size = os.path.getsize(corefile)
539 if size == curr_size:
543 cls.logger.error("Timed out waiting for coredump to complete:"
546 cls.logger.error("Coredump complete: %s, size %d",
552 Perform class setup before running the testcase
553 Remove shared memory files, start vpp and connect the vpp-api
555 super(VppTestCase, cls).setUpClass()
556 gc.collect() # run garbage collection first
557 cls.logger = get_logger(cls.__name__)
558 seed = os.environ["RND_SEED"]
560 if hasattr(cls, 'parallel_handler'):
561 cls.logger.addHandler(cls.parallel_handler)
562 cls.logger.propagate = False
564 cls.tempdir = tempfile.mkdtemp(
565 prefix='vpp-unittest-%s-' % cls.__name__)
566 cls.stats_sock = "%s/stats.sock" % cls.tempdir
567 cls.api_sock = "%s/api.sock" % cls.tempdir
568 cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
569 cls.file_handler.setFormatter(
570 Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
572 cls.file_handler.setLevel(DEBUG)
573 cls.logger.addHandler(cls.file_handler)
574 cls.logger.debug("--- setUpClass() for %s called ---" %
576 cls.shm_prefix = os.path.basename(cls.tempdir) # Only used for VAPI
577 os.chdir(cls.tempdir)
578 cls.logger.info("Temporary dir is %s, api socket is %s",
579 cls.tempdir, cls.api_sock)
580 cls.logger.debug("Random seed is %s" % seed)
582 cls.reset_packet_infos()
584 cls._old_captures = []
587 cls.registry = VppObjectRegistry()
588 cls.vpp_startup_failed = False
589 cls.reporter = KeepAliveReporter()
590 # need to catch exceptions here because if we raise, then the cleanup
591 # doesn't get called and we might end with a zombie vpp
594 cls.reporter.send_keep_alive(cls, 'setUpClass')
595 VppTestResult.current_test_case_info = TestCaseInfo(
596 cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
597 cls.vpp_stdout_deque = deque()
598 cls.vpp_stderr_deque = deque()
599 cls.pump_thread_stop_flag = Event()
600 cls.pump_thread_wakeup_pipe = os.pipe()
601 cls.pump_thread = Thread(target=pump_output, args=(cls,))
602 cls.pump_thread.daemon = True
603 cls.pump_thread.start()
604 if cls.debug_gdb or cls.debug_gdbserver:
605 cls.vapi_response_timeout = 0
606 cls.vapi = VppPapiProvider(cls.__name__, cls,
607 cls.vapi_response_timeout)
609 hook = hookmodule.StepHook(cls)
611 hook = hookmodule.PollHook(cls)
612 cls.vapi.register_hook(hook)
613 cls.statistics = VPPStats(socketname=cls.stats_sock)
617 cls.vpp_startup_failed = True
619 "VPP died shortly after startup, check the"
620 " output to standard error for possible cause")
624 except (vpp_papi.VPPIOError, Exception) as e:
625 cls.logger.debug("Exception connecting to vapi: %s" % e)
626 cls.vapi.disconnect()
628 if cls.debug_gdbserver:
629 print(colorize("You're running VPP inside gdbserver but "
630 "VPP-API connection failed, did you forget "
631 "to 'continue' VPP from within gdb?", RED))
633 except vpp_papi.VPPRuntimeError as e:
634 cls.logger.debug("%s" % e)
637 except Exception as e:
638 cls.logger.debug("Exception connecting to VPP: %s" % e)
643 def _debug_quit(cls):
644 if (cls.debug_gdbserver or cls.debug_gdb):
648 if cls.vpp.returncode is None:
650 print(double_line_delim)
651 print("VPP or GDB server is still running")
652 print(single_line_delim)
653 input("When done debugging, press ENTER to kill the "
654 "process and finish running the testcase...")
655 except AttributeError:
661 Disconnect vpp-api, kill vpp and cleanup shared memory files
665 # first signal that we want to stop the pump thread, then wake it up
666 if hasattr(cls, 'pump_thread_stop_flag'):
667 cls.pump_thread_stop_flag.set()
668 if hasattr(cls, 'pump_thread_wakeup_pipe'):
669 os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
670 if hasattr(cls, 'pump_thread'):
671 cls.logger.debug("Waiting for pump thread to stop")
672 cls.pump_thread.join()
673 if hasattr(cls, 'vpp_stderr_reader_thread'):
674 cls.logger.debug("Waiting for stderr pump to stop")
675 cls.vpp_stderr_reader_thread.join()
677 if hasattr(cls, 'vpp'):
678 if hasattr(cls, 'vapi'):
679 cls.logger.debug(cls.vapi.vpp.get_stats())
680 cls.logger.debug("Disconnecting class vapi client on %s",
682 cls.vapi.disconnect()
683 cls.logger.debug("Deleting class vapi attribute on %s",
687 if cls.vpp.returncode is None:
688 cls.wait_for_coredump()
689 cls.logger.debug("Sending TERM to vpp")
691 cls.logger.debug("Waiting for vpp to die")
693 outs, errs = cls.vpp.communicate(timeout=5)
694 except subprocess.TimeoutExpired:
696 outs, errs = cls.vpp.communicate()
697 cls.logger.debug("Deleting class vpp attribute on %s",
699 cls.vpp.stdout.close()
700 cls.vpp.stderr.close()
703 if cls.vpp_startup_failed:
704 stdout_log = cls.logger.info
705 stderr_log = cls.logger.critical
707 stdout_log = cls.logger.info
708 stderr_log = cls.logger.info
710 if hasattr(cls, 'vpp_stdout_deque'):
711 stdout_log(single_line_delim)
712 stdout_log('VPP output to stdout while running %s:', cls.__name__)
713 stdout_log(single_line_delim)
714 vpp_output = "".join(cls.vpp_stdout_deque)
715 with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
717 stdout_log('\n%s', vpp_output)
718 stdout_log(single_line_delim)
720 if hasattr(cls, 'vpp_stderr_deque'):
721 stderr_log(single_line_delim)
722 stderr_log('VPP output to stderr while running %s:', cls.__name__)
723 stderr_log(single_line_delim)
724 vpp_output = "".join(cls.vpp_stderr_deque)
725 with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
727 stderr_log('\n%s', vpp_output)
728 stderr_log(single_line_delim)
731 def tearDownClass(cls):
732 """ Perform final cleanup after running all tests in this test-case """
733 cls.logger.debug("--- tearDownClass() for %s called ---" %
735 cls.reporter.send_keep_alive(cls, 'tearDownClass')
737 cls.file_handler.close()
738 cls.reset_packet_infos()
740 debug_internal.on_tear_down_class(cls)
742 def show_commands_at_teardown(self):
743 """ Allow subclass specific teardown logging additions."""
744 self.logger.info("--- No test specific show commands provided. ---")
747 """ Show various debug prints after each test """
748 self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
749 (self.__class__.__name__, self._testMethodName,
750 self._testMethodDoc))
753 if not self.vpp_dead:
754 self.logger.debug(self.vapi.cli("show trace max 1000"))
755 self.logger.info(self.vapi.ppcli("show interface"))
756 self.logger.info(self.vapi.ppcli("show hardware"))
757 self.logger.info(self.statistics.set_errors_str())
758 self.logger.info(self.vapi.ppcli("show run"))
759 self.logger.info(self.vapi.ppcli("show log"))
760 self.logger.info(self.vapi.ppcli("show bihash"))
761 self.logger.info("Logging testcase specific show commands.")
762 self.show_commands_at_teardown()
763 self.registry.remove_vpp_config(self.logger)
764 # Save/Dump VPP api trace log
765 m = self._testMethodName
766 api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
767 tmp_api_trace = "/tmp/%s" % api_trace
768 vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
769 self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
770 self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
772 os.rename(tmp_api_trace, vpp_api_trace_log)
773 self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
775 except VppTransportSocketIOError:
776 self.logger.debug("VppTransportSocketIOError: Vpp dead. "
777 "Cannot log show commands.")
780 self.registry.unregister_all(self.logger)
783 """ Clear trace before running each test"""
784 super(VppTestCase, self).setUp()
785 self.reporter.send_keep_alive(self)
787 raise VppDiedError(rv=None, testcase=self.__class__.__name__,
788 method_name=self._testMethodName)
789 self.sleep(.1, "during setUp")
790 self.vpp_stdout_deque.append(
791 "--- test setUp() for %s.%s(%s) starts here ---\n" %
792 (self.__class__.__name__, self._testMethodName,
793 self._testMethodDoc))
794 self.vpp_stderr_deque.append(
795 "--- test setUp() for %s.%s(%s) starts here ---\n" %
796 (self.__class__.__name__, self._testMethodName,
797 self._testMethodDoc))
798 self.vapi.cli("clear trace")
799 # store the test instance inside the test class - so that objects
800 # holding the class can access instance methods (like assertEqual)
801 type(self).test_instance = self
804 def pg_enable_capture(cls, interfaces=None):
806 Enable capture on packet-generator interfaces
808 :param interfaces: iterable interface indexes (if None,
809 use self.pg_interfaces)
812 if interfaces is None:
813 interfaces = cls.pg_interfaces
818 def register_capture(cls, intf, worker):
819 """ Register a capture in the testclass """
820 # add to the list of captures with current timestamp
821 cls._captures.append((intf, worker))
824 def get_vpp_time(cls):
825 # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
826 # returns float("2.190522")
827 timestr = cls.vapi.cli('show clock')
828 head, sep, tail = timestr.partition(',')
829 head, sep, tail = head.partition('Time now')
833 def sleep_on_vpp_time(cls, sec):
834 """ Sleep according to time in VPP world """
835 # On a busy system with many processes
836 # we might end up with VPP time being slower than real world
837 # So take that into account when waiting for VPP to do something
838 start_time = cls.get_vpp_time()
839 while cls.get_vpp_time() - start_time < sec:
843 def pg_start(cls, trace=True):
844 """ Enable the PG, wait till it is done, then clean up """
845 for (intf, worker) in cls._old_captures:
846 intf.rename_previous_capture_file(intf.get_in_path(worker),
847 intf.in_history_counter)
848 cls._old_captures = []
850 cls.vapi.cli("clear trace")
851 cls.vapi.cli("trace add pg-input 1000")
852 cls.vapi.cli('packet-generator enable')
853 # PG, when starts, runs to completion -
854 # so let's avoid a race condition,
855 # and wait a little till it's done.
856 # Then clean it up - and then be gone.
857 deadline = time.time() + 300
858 while cls.vapi.cli('show packet-generator').find("Yes") != -1:
859 cls.sleep(0.01) # yield
860 if time.time() > deadline:
861 cls.logger.error("Timeout waiting for pg to stop")
863 for intf, worker in cls._captures:
864 cls.vapi.cli('packet-generator delete %s' %
865 intf.get_cap_name(worker))
866 cls._old_captures = cls._captures
870 def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
872 Create packet-generator interfaces.
874 :param interfaces: iterable indexes of the interfaces.
875 :returns: List of created interfaces.
880 intf = VppPGInterface(cls, i, gso, gso_size)
881 setattr(cls, intf.name, intf)
883 cls.pg_interfaces = result
887 def create_loopback_interfaces(cls, count):
889 Create loopback interfaces.
891 :param count: number of interfaces created.
892 :returns: List of created interfaces.
894 result = [VppLoInterface(cls) for i in range(count)]
896 setattr(cls, intf.name, intf)
897 cls.lo_interfaces = result
901 def create_bvi_interfaces(cls, count):
903 Create BVI interfaces.
905 :param count: number of interfaces created.
906 :returns: List of created interfaces.
908 result = [VppBviInterface(cls) for i in range(count)]
910 setattr(cls, intf.name, intf)
911 cls.bvi_interfaces = result
915 def extend_packet(packet, size, padding=' '):
917 Extend packet to given size by padding with spaces or custom padding
918 NOTE: Currently works only when Raw layer is present.
920 :param packet: packet
921 :param size: target size
922 :param padding: padding used to extend the payload
925 packet_len = len(packet) + 4
926 extend = size - packet_len
928 num = (extend // len(padding)) + 1
929 packet[Raw].load += (padding * num)[:extend].encode("ascii")
932 def reset_packet_infos(cls):
933 """ Reset the list of packet info objects and packet counts to zero """
934 cls._packet_infos = {}
935 cls._packet_count_for_dst_if_idx = {}
938 def create_packet_info(cls, src_if, dst_if):
940 Create packet info object containing the source and destination indexes
941 and add it to the testcase's packet info list
943 :param VppInterface src_if: source interface
944 :param VppInterface dst_if: destination interface
946 :returns: _PacketInfo object
950 info.index = len(cls._packet_infos)
951 info.src = src_if.sw_if_index
952 info.dst = dst_if.sw_if_index
953 if isinstance(dst_if, VppSubInterface):
954 dst_idx = dst_if.parent.sw_if_index
956 dst_idx = dst_if.sw_if_index
957 if dst_idx in cls._packet_count_for_dst_if_idx:
958 cls._packet_count_for_dst_if_idx[dst_idx] += 1
960 cls._packet_count_for_dst_if_idx[dst_idx] = 1
961 cls._packet_infos[info.index] = info
965 def info_to_payload(info):
967 Convert _PacketInfo object to packet payload
969 :param info: _PacketInfo object
971 :returns: string containing serialized data from packet info
973 return "%d %d %d %d %d" % (info.index, info.src, info.dst,
977 def payload_to_info(payload, payload_field='load'):
979 Convert packet payload to _PacketInfo object
981 :param payload: packet payload
982 :type payload: <class 'scapy.packet.Raw'>
983 :param payload_field: packet fieldname of payload "load" for
984 <class 'scapy.packet.Raw'>
985 :type payload_field: str
986 :returns: _PacketInfo object containing de-serialized data from payload
989 numbers = getattr(payload, payload_field).split()
991 info.index = int(numbers[0])
992 info.src = int(numbers[1])
993 info.dst = int(numbers[2])
994 info.ip = int(numbers[3])
995 info.proto = int(numbers[4])
998 def get_next_packet_info(self, info):
1000 Iterate over the packet info list stored in the testcase
1001 Start iteration with first element if info is None
1002 Continue based on index in info if info is specified
1004 :param info: info or None
1005 :returns: next info in list or None if no more infos
1010 next_index = info.index + 1
1011 if next_index == len(self._packet_infos):
1014 return self._packet_infos[next_index]
1016 def get_next_packet_info_for_interface(self, src_index, info):
1018 Search the packet info list for the next packet info with same source
1021 :param src_index: source interface index to search for
1022 :param info: packet info - where to start the search
1023 :returns: packet info or None
1027 info = self.get_next_packet_info(info)
1030 if info.src == src_index:
1033 def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
1035 Search the packet info list for the next packet info with same source
1036 and destination interface indexes
1038 :param src_index: source interface index to search for
1039 :param dst_index: destination interface index to search for
1040 :param info: packet info - where to start the search
1041 :returns: packet info or None
1045 info = self.get_next_packet_info_for_interface(src_index, info)
1048 if info.dst == dst_index:
1051 def assert_equal(self, real_value, expected_value, name_or_class=None):
1052 if name_or_class is None:
1053 self.assertEqual(real_value, expected_value)
1056 msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
1057 msg = msg % (getdoc(name_or_class).strip(),
1058 real_value, str(name_or_class(real_value)),
1059 expected_value, str(name_or_class(expected_value)))
1061 msg = "Invalid %s: %s does not match expected value %s" % (
1062 name_or_class, real_value, expected_value)
1064 self.assertEqual(real_value, expected_value, msg)
1066 def assert_in_range(self,
1074 msg = "Invalid %s: %s out of range <%s,%s>" % (
1075 name, real_value, expected_min, expected_max)
1076 self.assertTrue(expected_min <= real_value <= expected_max, msg)
1078 def assert_packet_checksums_valid(self, packet,
1079 ignore_zero_udp_checksums=True):
1080 received = packet.__class__(scapy.compat.raw(packet))
1081 udp_layers = ['UDP', 'UDPerror']
1082 checksum_fields = ['cksum', 'chksum']
1085 temp = received.__class__(scapy.compat.raw(received))
1087 layer = temp.getlayer(counter)
1089 layer = layer.copy()
1090 layer.remove_payload()
1091 for cf in checksum_fields:
1092 if hasattr(layer, cf):
1093 if ignore_zero_udp_checksums and \
1094 0 == getattr(layer, cf) and \
1095 layer.name in udp_layers:
1097 delattr(temp.getlayer(counter), cf)
1098 checksums.append((counter, cf))
1101 counter = counter + 1
1102 if 0 == len(checksums):
1104 temp = temp.__class__(scapy.compat.raw(temp))
1105 for layer, cf in checksums:
1106 calc_sum = getattr(temp[layer], cf)
1108 getattr(received[layer], cf), calc_sum,
1109 "packet checksum on layer #%d: %s" % (layer, temp[layer].name))
1111 "Checksum field `%s` on `%s` layer has correct value `%s`" %
1112 (cf, temp[layer].name, calc_sum))
1114 def assert_checksum_valid(self, received_packet, layer,
1115 field_name='chksum',
1116 ignore_zero_checksum=False):
1117 """ Check checksum of received packet on given layer """
1118 received_packet_checksum = getattr(received_packet[layer], field_name)
1119 if ignore_zero_checksum and 0 == received_packet_checksum:
1121 recalculated = received_packet.__class__(
1122 scapy.compat.raw(received_packet))
1123 delattr(recalculated[layer], field_name)
1124 recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
1125 self.assert_equal(received_packet_checksum,
1126 getattr(recalculated[layer], field_name),
1127 "packet checksum on layer: %s" % layer)
1129 def assert_ip_checksum_valid(self, received_packet,
1130 ignore_zero_checksum=False):
1131 self.assert_checksum_valid(received_packet, 'IP',
1132 ignore_zero_checksum=ignore_zero_checksum)
1134 def assert_tcp_checksum_valid(self, received_packet,
1135 ignore_zero_checksum=False):
1136 self.assert_checksum_valid(received_packet, 'TCP',
1137 ignore_zero_checksum=ignore_zero_checksum)
1139 def assert_udp_checksum_valid(self, received_packet,
1140 ignore_zero_checksum=True):
1141 self.assert_checksum_valid(received_packet, 'UDP',
1142 ignore_zero_checksum=ignore_zero_checksum)
1144 def assert_embedded_icmp_checksum_valid(self, received_packet):
1145 if received_packet.haslayer(IPerror):
1146 self.assert_checksum_valid(received_packet, 'IPerror')
1147 if received_packet.haslayer(TCPerror):
1148 self.assert_checksum_valid(received_packet, 'TCPerror')
1149 if received_packet.haslayer(UDPerror):
1150 self.assert_checksum_valid(received_packet, 'UDPerror',
1151 ignore_zero_checksum=True)
1152 if received_packet.haslayer(ICMPerror):
1153 self.assert_checksum_valid(received_packet, 'ICMPerror')
1155 def assert_icmp_checksum_valid(self, received_packet):
1156 self.assert_checksum_valid(received_packet, 'ICMP')
1157 self.assert_embedded_icmp_checksum_valid(received_packet)
1159 def assert_icmpv6_checksum_valid(self, pkt):
1160 if pkt.haslayer(ICMPv6DestUnreach):
1161 self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
1162 self.assert_embedded_icmp_checksum_valid(pkt)
1163 if pkt.haslayer(ICMPv6EchoRequest):
1164 self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
1165 if pkt.haslayer(ICMPv6EchoReply):
1166 self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
1168 def get_packet_counter(self, counter):
1169 if counter.startswith("/"):
1170 counter_value = self.statistics.get_counter(counter)
1172 counters = self.vapi.cli("sh errors").split('\n')
1174 for i in range(1, len(counters) - 1):
1175 results = counters[i].split()
1176 if results[1] == counter:
1177 counter_value = int(results[0])
1179 return counter_value
1181 def assert_packet_counter_equal(self, counter, expected_value):
1182 counter_value = self.get_packet_counter(counter)
1183 self.assert_equal(counter_value, expected_value,
1184 "packet counter `%s'" % counter)
1186 def assert_error_counter_equal(self, counter, expected_value):
1187 counter_value = self.statistics[counter].sum()
1188 self.assert_equal(counter_value, expected_value,
1189 "error counter `%s'" % counter)
1192 def sleep(cls, timeout, remark=None):
1194 # /* Allow sleep(0) to maintain win32 semantics, and as decreed
1195 # * by Guido, only the main thread can be interrupted.
1197 # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
1200 if hasattr(os, 'sched_yield'):
1206 cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
1207 before = time.time()
1210 if after - before > 2 * timeout:
1211 cls.logger.error("unexpected self.sleep() result - "
1212 "slept for %es instead of ~%es!",
1213 after - before, timeout)
1216 "Finished sleep (%s) - slept %es (wanted %es)",
1217 remark, after - before, timeout)
1219 def pg_send(self, intf, pkts, worker=None, trace=True):
1220 intf.add_stream(pkts, worker=worker)
1221 self.pg_enable_capture(self.pg_interfaces)
1222 self.pg_start(trace=trace)
1224 def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
1225 self.pg_send(intf, pkts)
1228 for i in self.pg_interfaces:
1229 i.get_capture(0, timeout=timeout)
1230 i.assert_nothing_captured(remark=remark)
1233 def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None,
1237 self.pg_send(intf, pkts, worker=worker, trace=trace)
1238 rx = output.get_capture(n_rx)
1241 def send_and_expect_only(self, intf, pkts, output, timeout=None):
1242 self.pg_send(intf, pkts)
1243 rx = output.get_capture(len(pkts))
1247 for i in self.pg_interfaces:
1248 if i not in outputs:
1249 i.get_capture(0, timeout=timeout)
1250 i.assert_nothing_captured()
1256 def get_testcase_doc_name(test):
1257 return getdoc(test.__class__).splitlines()[0]
1260 def get_test_description(descriptions, test):
1261 short_description = test.shortDescription()
1262 if descriptions and short_description:
1263 return short_description
1268 class TestCaseInfo(object):
1269 def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
1270 self.logger = logger
1271 self.tempdir = tempdir
1272 self.vpp_pid = vpp_pid
1273 self.vpp_bin_path = vpp_bin_path
1274 self.core_crash_test = None
1277 class VppTestResult(unittest.TestResult):
1279 @property result_string
1280 String variable to store the test case result string.
1282 List variable containing 2-tuples of TestCase instances and strings
1283 holding formatted tracebacks. Each tuple represents a test which
1284 raised an unexpected exception.
1286 List variable containing 2-tuples of TestCase instances and strings
1287 holding formatted tracebacks. Each tuple represents a test where
1288 a failure was explicitly signalled using the TestCase.assert*()
1292 failed_test_cases_info = set()
1293 core_crash_test_cases_info = set()
1294 current_test_case_info = None
1296 def __init__(self, stream=None, descriptions=None, verbosity=None,
1299 :param stream File descriptor to store where to report test results.
1300 Set to the standard error stream by default.
1301 :param descriptions Boolean variable to store information if to use
1302 test case descriptions.
1303 :param verbosity Integer variable to store required verbosity level.
1305 super(VppTestResult, self).__init__(stream, descriptions, verbosity)
1306 self.stream = stream
1307 self.descriptions = descriptions
1308 self.verbosity = verbosity
1309 self.result_string = None
1310 self.runner = runner
1312 def addSuccess(self, test):
1314 Record a test succeeded result
1319 if self.current_test_case_info:
1320 self.current_test_case_info.logger.debug(
1321 "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
1322 test._testMethodName,
1323 test._testMethodDoc))
1324 unittest.TestResult.addSuccess(self, test)
1325 self.result_string = colorize("OK", GREEN)
1327 self.send_result_through_pipe(test, PASS)
1329 def addSkip(self, test, reason):
1331 Record a test skipped.
1337 if self.current_test_case_info:
1338 self.current_test_case_info.logger.debug(
1339 "--- addSkip() %s.%s(%s) called, reason is %s" %
1340 (test.__class__.__name__, test._testMethodName,
1341 test._testMethodDoc, reason))
1342 unittest.TestResult.addSkip(self, test, reason)
1343 self.result_string = colorize("SKIP", YELLOW)
1345 self.send_result_through_pipe(test, SKIP)
1347 def symlink_failed(self):
1348 if self.current_test_case_info:
1350 failed_dir = os.getenv('FAILED_DIR')
1351 link_path = os.path.join(
1354 os.path.basename(self.current_test_case_info.tempdir))
1356 self.current_test_case_info.logger.debug(
1357 "creating a link to the failed test")
1358 self.current_test_case_info.logger.debug(
1359 "os.symlink(%s, %s)" %
1360 (self.current_test_case_info.tempdir, link_path))
1361 if os.path.exists(link_path):
1362 self.current_test_case_info.logger.debug(
1363 'symlink already exists')
1365 os.symlink(self.current_test_case_info.tempdir, link_path)
1367 except Exception as e:
1368 self.current_test_case_info.logger.error(e)
1370 def send_result_through_pipe(self, test, result):
1371 if hasattr(self, 'test_framework_result_pipe'):
1372 pipe = self.test_framework_result_pipe
1374 pipe.send((test.id(), result))
1376 def log_error(self, test, err, fn_name):
1377 if self.current_test_case_info:
1378 if isinstance(test, unittest.suite._ErrorHolder):
1379 test_name = test.description
1381 test_name = '%s.%s(%s)' % (test.__class__.__name__,
1382 test._testMethodName,
1383 test._testMethodDoc)
1384 self.current_test_case_info.logger.debug(
1385 "--- %s() %s called, err is %s" %
1386 (fn_name, test_name, err))
1387 self.current_test_case_info.logger.debug(
1388 "formatted exception is:\n%s" %
1389 "".join(format_exception(*err)))
1391 def add_error(self, test, err, unittest_fn, error_type):
1392 if error_type == FAIL:
1393 self.log_error(test, err, 'addFailure')
1394 error_type_str = colorize("FAIL", RED)
1395 elif error_type == ERROR:
1396 self.log_error(test, err, 'addError')
1397 error_type_str = colorize("ERROR", RED)
1399 raise Exception('Error type %s cannot be used to record an '
1400 'error or a failure' % error_type)
1402 unittest_fn(self, test, err)
1403 if self.current_test_case_info:
1404 self.result_string = "%s [ temp dir used by test case: %s ]" % \
1406 self.current_test_case_info.tempdir)
1407 self.symlink_failed()
1408 self.failed_test_cases_info.add(self.current_test_case_info)
1409 if is_core_present(self.current_test_case_info.tempdir):
1410 if not self.current_test_case_info.core_crash_test:
1411 if isinstance(test, unittest.suite._ErrorHolder):
1412 test_name = str(test)
1414 test_name = "'{!s}' ({!s})".format(
1415 get_testcase_doc_name(test), test.id())
1416 self.current_test_case_info.core_crash_test = test_name
1417 self.core_crash_test_cases_info.add(
1418 self.current_test_case_info)
1420 self.result_string = '%s [no temp dir]' % error_type_str
1422 self.send_result_through_pipe(test, error_type)
1424 def addFailure(self, test, err):
1426 Record a test failed result
1429 :param err: error message
1432 self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
1434 def addError(self, test, err):
1436 Record a test error result
1439 :param err: error message
1442 self.add_error(test, err, unittest.TestResult.addError, ERROR)
1444 def getDescription(self, test):
1446 Get test description
1449 :returns: test description
1452 return get_test_description(self.descriptions, test)
1454 def startTest(self, test):
1462 def print_header(test):
1463 test_doc = getdoc(test)
1465 raise Exception("No doc string for test '%s'" % test.id())
1466 test_title = test_doc.splitlines()[0]
1467 test_title_colored = colorize(test_title, GREEN)
1468 if test.is_tagged_run_solo():
1469 # long live PEP-8 and 80 char width limitation...
1471 test_title_colored = colorize("SOLO RUN: " + test_title, c)
1473 # This block may overwrite the colorized title above,
1474 # but we want this to stand out and be fixed
1475 if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
1477 w = "FIXME with VPP workers: "
1478 test_title_colored = colorize(w + test_title, c)
1480 if not hasattr(test.__class__, '_header_printed'):
1481 print(double_line_delim)
1482 print(test_title_colored)
1483 print(double_line_delim)
1484 test.__class__._header_printed = True
1487 self.start_test = time.time()
1488 unittest.TestResult.startTest(self, test)
1489 if self.verbosity > 0:
1490 self.stream.writeln(
1491 "Starting " + self.getDescription(test) + " ...")
1492 self.stream.writeln(single_line_delim)
1494 def stopTest(self, test):
1496 Called when the given test has been run
1501 unittest.TestResult.stopTest(self, test)
1503 if self.verbosity > 0:
1504 self.stream.writeln(single_line_delim)
1505 self.stream.writeln("%-73s%s" % (self.getDescription(test),
1506 self.result_string))
1507 self.stream.writeln(single_line_delim)
1509 self.stream.writeln("%-68s %4.2f %s" %
1510 (self.getDescription(test),
1511 time.time() - self.start_test,
1512 self.result_string))
1514 self.send_result_through_pipe(test, TEST_RUN)
1516 def printErrors(self):
1518 Print errors from running the test case
1520 if len(self.errors) > 0 or len(self.failures) > 0:
1521 self.stream.writeln()
1522 self.printErrorList('ERROR', self.errors)
1523 self.printErrorList('FAIL', self.failures)
1525 # ^^ that is the last output from unittest before summary
1526 if not self.runner.print_summary:
1527 devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
1528 self.stream = devnull
1529 self.runner.stream = devnull
1531 def printErrorList(self, flavour, errors):
1533 Print error list to the output stream together with error type
1534 and test case description.
1536 :param flavour: error type
1537 :param errors: iterable errors
1540 for test, err in errors:
1541 self.stream.writeln(double_line_delim)
1542 self.stream.writeln("%s: %s" %
1543 (flavour, self.getDescription(test)))
1544 self.stream.writeln(single_line_delim)
1545 self.stream.writeln("%s" % err)
1548 class VppTestRunner(unittest.TextTestRunner):
1550 A basic test runner implementation which prints results to standard error.
1554 def resultclass(self):
1555 """Class maintaining the results of the tests"""
1556 return VppTestResult
1558 def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
1559 result_pipe=None, failfast=False, buffer=False,
1560 resultclass=None, print_summary=True, **kwargs):
1561 # ignore stream setting here, use hard-coded stdout to be in sync
1562 # with prints from VppTestCase methods ...
1563 super(VppTestRunner, self).__init__(sys.stdout, descriptions,
1564 verbosity, failfast, buffer,
1565 resultclass, **kwargs)
1566 KeepAliveReporter.pipe = keep_alive_pipe
1568 self.orig_stream = self.stream
1569 self.resultclass.test_framework_result_pipe = result_pipe
1571 self.print_summary = print_summary
1573 def _makeResult(self):
1574 return self.resultclass(self.stream,
1579 def run(self, test):
1586 faulthandler.enable() # emit stack trace to stderr if killed by signal
1588 result = super(VppTestRunner, self).run(test)
1589 if not self.print_summary:
1590 self.stream = self.orig_stream
1591 result.stream = self.orig_stream
1595 class Worker(Thread):
1596 def __init__(self, executable_args, logger, env=None, *args, **kwargs):
1597 super(Worker, self).__init__(*args, **kwargs)
1598 self.logger = logger
1599 self.args = executable_args
1600 if hasattr(self, 'testcase') and self.testcase.debug_all:
1601 if self.testcase.debug_gdbserver:
1602 self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
1603 .format(port=self.testcase.gdbserver_port)] + args
1604 elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
1605 self.args.append(self.wait_for_gdb)
1606 self.app_bin = executable_args[0]
1607 self.app_name = os.path.basename(self.app_bin)
1608 if hasattr(self, 'role'):
1609 self.app_name += ' {role}'.format(role=self.role)
1612 env = {} if env is None else env
1613 self.env = copy.deepcopy(env)
1615 def wait_for_enter(self):
1616 if not hasattr(self, 'testcase'):
1618 if self.testcase.debug_all and self.testcase.debug_gdbserver:
1620 print(double_line_delim)
1621 print("Spawned GDB Server for '{app}' with PID: {pid}"
1622 .format(app=self.app_name, pid=self.process.pid))
1623 elif self.testcase.debug_all and self.testcase.debug_gdb:
1625 print(double_line_delim)
1626 print("Spawned '{app}' with PID: {pid}"
1627 .format(app=self.app_name, pid=self.process.pid))
1630 print(single_line_delim)
1631 print("You can debug '{app}' using:".format(app=self.app_name))
1632 if self.testcase.debug_gdbserver:
1633 print("sudo gdb " + self.app_bin +
1634 " -ex 'target remote localhost:{port}'"
1635 .format(port=self.testcase.gdbserver_port))
1636 print("Now is the time to attach gdb by running the above "
1637 "command, set up breakpoints etc., then resume from "
1638 "within gdb by issuing the 'continue' command")
1639 self.testcase.gdbserver_port += 1
1640 elif self.testcase.debug_gdb:
1641 print("sudo gdb " + self.app_bin +
1642 " -ex 'attach {pid}'".format(pid=self.process.pid))
1643 print("Now is the time to attach gdb by running the above "
1644 "command and set up breakpoints etc., then resume from"
1645 " within gdb by issuing the 'continue' command")
1646 print(single_line_delim)
1647 input("Press ENTER to continue running the testcase...")
1650 executable = self.args[0]
1651 if not os.path.exists(executable) or not os.access(
1652 executable, os.F_OK | os.X_OK):
1653 # Exit code that means some system file did not exist,
1654 # could not be opened, or had some other kind of error.
1655 self.result = os.EX_OSFILE
1656 raise EnvironmentError(
1657 "executable '%s' is not found or executable." % executable)
1658 self.logger.debug("Running executable: '{app}'"
1659 .format(app=' '.join(self.args)))
1660 env = os.environ.copy()
1661 env.update(self.env)
1662 env["CK_LOG_FILE_NAME"] = "-"
1663 self.process = subprocess.Popen(
1664 self.args, shell=False, env=env, preexec_fn=os.setpgrp,
1665 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1666 self.wait_for_enter()
1667 out, err = self.process.communicate()
1668 self.logger.debug("Finished running `{app}'".format(app=self.app_name))
1669 self.logger.info("Return code is `%s'" % self.process.returncode)
1670 self.logger.info(single_line_delim)
1671 self.logger.info("Executable `{app}' wrote to stdout:"
1672 .format(app=self.app_name))
1673 self.logger.info(single_line_delim)
1674 self.logger.info(out.decode('utf-8'))
1675 self.logger.info(single_line_delim)
1676 self.logger.info("Executable `{app}' wrote to stderr:"
1677 .format(app=self.app_name))
1678 self.logger.info(single_line_delim)
1679 self.logger.info(err.decode('utf-8'))
1680 self.logger.info(single_line_delim)
1681 self.result = self.process.returncode
1684 if __name__ == '__main__':