3 from __future__ import print_function
19 from collections import deque
20 from threading import Thread, Event
21 from inspect import getdoc, isclass
22 from traceback import format_exception
23 from logging import FileHandler, DEBUG, Formatter
26 from scapy.packet import Raw
27 import hook as hookmodule
28 from vpp_pg_interface import VppPGInterface
29 from vpp_sub_interface import VppSubInterface
30 from vpp_lo_interface import VppLoInterface
31 from vpp_bvi_interface import VppBviInterface
32 from vpp_papi_provider import VppPapiProvider
34 from vpp_papi.vpp_stats import VPPStats
35 from vpp_papi.vpp_transport_shmem import VppTransportShmemIOError
36 from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
38 from vpp_object import VppObjectRegistry
39 from util import ppp, is_core_present
40 from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
41 from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
42 from scapy.layers.inet6 import ICMPv6EchoReply
44 logger = logging.getLogger(__name__)
46 # Set up an empty logger for the testcase that can be overridden as necessary
47 null_logger = logging.getLogger('VppTestCase')
48 null_logger.addHandler(logging.NullHandler())
57 class BoolEnvironmentVariable(object):
59 def __init__(self, env_var_name, default='n', true_values=None):
60 self.name = env_var_name
61 self.default = default
62 self.true_values = true_values if true_values is not None else \
66 return os.getenv(self.name, self.default).lower() in self.true_values
68 if sys.version_info[0] == 2:
69 __nonzero__ = __bool__
72 return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
73 (self.name, self.default, self.true_values)
76 debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
81 Test framework module.
83 The module provides a set of tools for constructing and running tests and
84 representing the results.
88 class VppDiedError(Exception):
89 """ exception for reporting that the subprocess has died."""
91 signals_by_value = {v: k for k, v in signal.__dict__.items() if
92 k.startswith('SIG') and not k.startswith('SIG_')}
94 def __init__(self, rv=None, testcase=None, method_name=None):
96 self.signal_name = None
97 self.testcase = testcase
98 self.method_name = method_name
101 self.signal_name = VppDiedError.signals_by_value[-rv]
102 except (KeyError, TypeError):
105 if testcase is None and method_name is None:
108 in_msg = 'running %s.%s ' % (testcase, method_name)
110 msg = "VPP subprocess died %sunexpectedly with return code: %d%s." % (
113 ' [%s]' % (self.signal_name if
114 self.signal_name is not None else ''))
115 super(VppDiedError, self).__init__(msg)
118 class _PacketInfo(object):
119 """Private class to create packet info object.
121 Help process information about the next packet.
122 Set variables to default values.
124 #: Store the index of the packet.
126 #: Store the index of the source packet generator interface of the packet.
128 #: Store the index of the destination packet generator interface
131 #: Store expected ip version
133 #: Store expected upper protocol
135 #: Store the copy of the former packet.
138 def __eq__(self, other):
139 index = self.index == other.index
140 src = self.src == other.src
141 dst = self.dst == other.dst
142 data = self.data == other.data
143 return index and src and dst and data
146 def pump_output(testclass):
147 """ pump output from vpp stdout/stderr to proper queues """
150 while not testclass.pump_thread_stop_flag.is_set():
151 readable = select.select([testclass.vpp.stdout.fileno(),
152 testclass.vpp.stderr.fileno(),
153 testclass.pump_thread_wakeup_pipe[0]],
155 if testclass.vpp.stdout.fileno() in readable:
156 read = os.read(testclass.vpp.stdout.fileno(), 102400)
158 split = read.decode('ascii',
159 errors='backslashreplace').splitlines(True)
160 if len(stdout_fragment) > 0:
161 split[0] = "%s%s" % (stdout_fragment, split[0])
162 if len(split) > 0 and split[-1].endswith("\n"):
166 stdout_fragment = split[-1]
167 testclass.vpp_stdout_deque.extend(split[:limit])
168 if not testclass.cache_vpp_output:
169 for line in split[:limit]:
170 testclass.logger.info(
171 "VPP STDOUT: %s" % line.rstrip("\n"))
172 if testclass.vpp.stderr.fileno() in readable:
173 read = os.read(testclass.vpp.stderr.fileno(), 102400)
175 split = read.decode('ascii',
176 errors='backslashreplace').splitlines(True)
177 if len(stderr_fragment) > 0:
178 split[0] = "%s%s" % (stderr_fragment, split[0])
179 if len(split) > 0 and split[-1].endswith("\n"):
183 stderr_fragment = split[-1]
185 testclass.vpp_stderr_deque.extend(split[:limit])
186 if not testclass.cache_vpp_output:
187 for line in split[:limit]:
188 testclass.logger.error(
189 "VPP STDERR: %s" % line.rstrip("\n"))
190 # ignoring the dummy pipe here intentionally - the
191 # flag will take care of properly terminating the loop
194 def _is_skip_aarch64_set():
195 return BoolEnvironmentVariable('SKIP_AARCH64')
198 is_skip_aarch64_set = _is_skip_aarch64_set()
201 def _is_platform_aarch64():
202 return platform.machine() == 'aarch64'
205 is_platform_aarch64 = _is_platform_aarch64()
208 def _running_extended_tests():
209 return BoolEnvironmentVariable("EXTENDED_TESTS")
212 running_extended_tests = _running_extended_tests()
215 def _running_gcov_tests():
216 return BoolEnvironmentVariable("GCOV_TESTS")
219 running_gcov_tests = _running_gcov_tests()
222 class KeepAliveReporter(object):
224 Singleton object which reports test start to parent process
229 self.__dict__ = self._shared_state
237 def pipe(self, pipe):
238 if self._pipe is not None:
239 raise Exception("Internal error - pipe should only be set once.")
242 def send_keep_alive(self, test, desc=None):
244 Write current test tmpdir & desc to keep-alive pipe to signal liveness
246 if self.pipe is None:
247 # if not running forked..
251 desc = '%s (%s)' % (desc, unittest.util.strclass(test))
255 self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
258 class VppTestCase(unittest.TestCase):
259 """This subclass is a base class for VPP test cases that are implemented as
260 classes. It provides methods to create and run test case.
263 extra_vpp_punt_config = []
264 extra_vpp_plugin_config = []
266 vapi_response_timeout = 5
269 def packet_infos(self):
270 """List of packet infos"""
271 return self._packet_infos
274 def get_packet_count_for_if_idx(cls, dst_if_index):
275 """Get the number of packet info for specified destination if index"""
276 if dst_if_index in cls._packet_count_for_dst_if_idx:
277 return cls._packet_count_for_dst_if_idx[dst_if_index]
283 """ if the test case class is timing-sensitive - return true """
288 """Return the instance of this testcase"""
289 return cls.test_instance
292 def set_debug_flags(cls, d):
293 cls.gdbserver_port = 7777
294 cls.debug_core = False
295 cls.debug_gdb = False
296 cls.debug_gdbserver = False
297 cls.debug_all = False
302 cls.debug_core = True
303 elif dl == "gdb" or dl == "gdb-all":
305 elif dl == "gdbserver" or dl == "gdbserver-all":
306 cls.debug_gdbserver = True
308 raise Exception("Unrecognized DEBUG option: '%s'" % d)
309 if dl == "gdb-all" or dl == "gdbserver-all":
313 def get_least_used_cpu():
314 cpu_usage_list = [set(range(psutil.cpu_count()))]
315 vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
316 if 'vpp_main' == p.info['name']]
317 for vpp_process in vpp_processes:
318 for cpu_usage_set in cpu_usage_list:
320 cpu_num = vpp_process.cpu_num()
321 if cpu_num in cpu_usage_set:
322 cpu_usage_set_index = cpu_usage_list.index(
324 if cpu_usage_set_index == len(cpu_usage_list) - 1:
325 cpu_usage_list.append({cpu_num})
327 cpu_usage_list[cpu_usage_set_index + 1].add(
329 cpu_usage_set.remove(cpu_num)
331 except psutil.NoSuchProcess:
334 for cpu_usage_set in cpu_usage_list:
335 if len(cpu_usage_set) > 0:
336 min_usage_set = cpu_usage_set
339 return random.choice(tuple(min_usage_set))
342 def setUpConstants(cls):
343 """ Set-up the test case class based on environment variables """
344 cls.step = BoolEnvironmentVariable('STEP')
345 d = os.getenv("DEBUG", None)
346 # inverted case to handle '' == True
347 c = os.getenv("CACHE_OUTPUT", "1")
348 cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
349 cls.set_debug_flags(d)
350 cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
351 cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
352 cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
353 cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
355 if cls.plugin_path is not None:
356 if cls.extern_plugin_path is not None:
357 plugin_path = "%s:%s" % (
358 cls.plugin_path, cls.extern_plugin_path)
360 plugin_path = cls.plugin_path
361 elif cls.extern_plugin_path is not None:
362 plugin_path = cls.extern_plugin_path
364 if cls.step or cls.debug_gdb or cls.debug_gdbserver:
365 debug_cli = "cli-listen localhost:5002"
367 size = os.getenv("COREDUMP_SIZE")
369 coredump_size = "coredump-size %s" % size
370 if coredump_size is None:
371 coredump_size = "coredump-size unlimited"
373 cpu_core_number = cls.get_least_used_cpu()
374 if not hasattr(cls, "worker_config"):
375 cls.worker_config = ""
377 default_variant = os.getenv("VARIANT")
378 if default_variant is not None:
379 default_variant = "defaults { %s 100 }" % default_variant
383 api_fuzzing = os.getenv("API_FUZZ")
384 if api_fuzzing is None:
387 cls.vpp_cmdline = [cls.vpp_bin, "unix",
388 "{", "nodaemon", debug_cli, "full-coredump",
389 coredump_size, "runtime-dir", cls.tempdir, "}",
390 "api-trace", "{", "on", "}", "api-segment", "{",
391 "prefix", cls.shm_prefix, "}", "cpu", "{",
392 "main-core", str(cpu_core_number),
393 cls.worker_config, "}",
394 "physmem", "{", "max-size", "32m", "}",
395 "statseg", "{", "socket-name", cls.stats_sock, "}",
396 "socksvr", "{", "socket-name", cls.api_sock, "}",
397 "node { ", default_variant, "}",
398 "api-fuzz {", api_fuzzing, "}",
400 "{", "plugin", "dpdk_plugin.so", "{", "disable",
401 "}", "plugin", "rdma_plugin.so", "{", "disable",
402 "}", "plugin", "lisp_unittest_plugin.so", "{",
404 "}", "plugin", "unittest_plugin.so", "{", "enable",
405 "}"] + cls.extra_vpp_plugin_config + ["}", ]
407 if cls.extra_vpp_punt_config is not None:
408 cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
409 if plugin_path is not None:
410 cls.vpp_cmdline.extend(["plugin_path", plugin_path])
411 if cls.test_plugin_path is not None:
412 cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
414 cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
415 cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
418 def wait_for_enter(cls):
419 if cls.debug_gdbserver:
420 print(double_line_delim)
421 print("Spawned GDB server with PID: %d" % cls.vpp.pid)
423 print(double_line_delim)
424 print("Spawned VPP with PID: %d" % cls.vpp.pid)
426 cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
428 print(single_line_delim)
429 print("You can debug VPP using:")
430 if cls.debug_gdbserver:
431 print("sudo gdb " + cls.vpp_bin +
432 " -ex 'target remote localhost:{port}'"
433 .format(port=cls.gdbserver_port))
434 print("Now is the time to attach gdb by running the above "
435 "command, set up breakpoints etc., then resume VPP from "
436 "within gdb by issuing the 'continue' command")
437 cls.gdbserver_port += 1
439 print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
440 print("Now is the time to attach gdb by running the above "
441 "command and set up breakpoints etc., then resume VPP from"
442 " within gdb by issuing the 'continue' command")
443 print(single_line_delim)
444 input("Press ENTER to continue running the testcase...")
448 cmdline = cls.vpp_cmdline
450 if cls.debug_gdbserver:
451 gdbserver = '/usr/bin/gdbserver'
452 if not os.path.isfile(gdbserver) or \
453 not os.access(gdbserver, os.X_OK):
454 raise Exception("gdbserver binary '%s' does not exist or is "
455 "not executable" % gdbserver)
457 cmdline = [gdbserver, 'localhost:{port}'
458 .format(port=cls.gdbserver_port)] + cls.vpp_cmdline
459 cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
462 cls.vpp = subprocess.Popen(cmdline,
463 stdout=subprocess.PIPE,
464 stderr=subprocess.PIPE)
465 except subprocess.CalledProcessError as e:
466 cls.logger.critical("Subprocess returned with non-0 return code: ("
470 cls.logger.critical("Subprocess returned with OS error: "
471 "(%s) %s", e.errno, e.strerror)
473 except Exception as e:
474 cls.logger.exception("Subprocess returned unexpected from "
481 def wait_for_coredump(cls):
482 corefile = cls.tempdir + "/core"
483 if os.path.isfile(corefile):
484 cls.logger.error("Waiting for coredump to complete: %s", corefile)
485 curr_size = os.path.getsize(corefile)
486 deadline = time.time() + 60
488 while time.time() < deadline:
491 curr_size = os.path.getsize(corefile)
492 if size == curr_size:
496 cls.logger.error("Timed out waiting for coredump to complete:"
499 cls.logger.error("Coredump complete: %s, size %d",
505 Perform class setup before running the testcase
506 Remove shared memory files, start vpp and connect the vpp-api
508 super(VppTestCase, cls).setUpClass()
509 gc.collect() # run garbage collection first
510 cls.logger = get_logger(cls.__name__)
511 seed = os.environ["RND_SEED"]
513 if hasattr(cls, 'parallel_handler'):
514 cls.logger.addHandler(cls.parallel_handler)
515 cls.logger.propagate = False
517 cls.tempdir = tempfile.mkdtemp(
518 prefix='vpp-unittest-%s-' % cls.__name__)
519 cls.stats_sock = "%s/stats.sock" % cls.tempdir
520 cls.api_sock = "%s/api.sock" % cls.tempdir
521 cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
522 cls.file_handler.setFormatter(
523 Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
525 cls.file_handler.setLevel(DEBUG)
526 cls.logger.addHandler(cls.file_handler)
527 cls.logger.debug("--- setUpClass() for %s called ---" %
529 cls.shm_prefix = os.path.basename(cls.tempdir)
530 os.chdir(cls.tempdir)
531 cls.logger.info("Temporary dir is %s, shm prefix is %s",
532 cls.tempdir, cls.shm_prefix)
533 cls.logger.debug("Random seed is %s" % seed)
535 cls.reset_packet_infos()
539 cls.registry = VppObjectRegistry()
540 cls.vpp_startup_failed = False
541 cls.reporter = KeepAliveReporter()
542 # need to catch exceptions here because if we raise, then the cleanup
543 # doesn't get called and we might end with a zombie vpp
546 cls.reporter.send_keep_alive(cls, 'setUpClass')
547 VppTestResult.current_test_case_info = TestCaseInfo(
548 cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
549 cls.vpp_stdout_deque = deque()
550 cls.vpp_stderr_deque = deque()
551 cls.pump_thread_stop_flag = Event()
552 cls.pump_thread_wakeup_pipe = os.pipe()
553 cls.pump_thread = Thread(target=pump_output, args=(cls,))
554 cls.pump_thread.daemon = True
555 cls.pump_thread.start()
556 if cls.debug_gdb or cls.debug_gdbserver:
557 cls.vapi_response_timeout = 0
558 cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
559 cls.vapi_response_timeout)
561 hook = hookmodule.StepHook(cls)
563 hook = hookmodule.PollHook(cls)
564 cls.vapi.register_hook(hook)
565 cls.statistics = VPPStats(socketname=cls.stats_sock)
569 cls.vpp_startup_failed = True
571 "VPP died shortly after startup, check the"
572 " output to standard error for possible cause")
576 except vpp_papi.VPPIOError as e:
577 cls.logger.debug("Exception connecting to vapi: %s" % e)
578 cls.vapi.disconnect()
580 if cls.debug_gdbserver:
581 print(colorize("You're running VPP inside gdbserver but "
582 "VPP-API connection failed, did you forget "
583 "to 'continue' VPP from within gdb?", RED))
585 except vpp_papi.VPPRuntimeError as e:
586 cls.logger.debug("%s" % e)
589 except Exception as e:
590 cls.logger.debug("Exception connecting to VPP: %s" % e)
595 def _debug_quit(cls):
596 if (cls.debug_gdbserver or cls.debug_gdb):
600 if cls.vpp.returncode is None:
602 print(double_line_delim)
603 print("VPP or GDB server is still running")
604 print(single_line_delim)
605 input("When done debugging, press ENTER to kill the "
606 "process and finish running the testcase...")
607 except AttributeError:
613 Disconnect vpp-api, kill vpp and cleanup shared memory files
617 # first signal that we want to stop the pump thread, then wake it up
618 if hasattr(cls, 'pump_thread_stop_flag'):
619 cls.pump_thread_stop_flag.set()
620 if hasattr(cls, 'pump_thread_wakeup_pipe'):
621 os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
622 if hasattr(cls, 'pump_thread'):
623 cls.logger.debug("Waiting for pump thread to stop")
624 cls.pump_thread.join()
625 if hasattr(cls, 'vpp_stderr_reader_thread'):
626 cls.logger.debug("Waiting for stderr pump to stop")
627 cls.vpp_stderr_reader_thread.join()
629 if hasattr(cls, 'vpp'):
630 if hasattr(cls, 'vapi'):
631 cls.logger.debug(cls.vapi.vpp.get_stats())
632 cls.logger.debug("Disconnecting class vapi client on %s",
634 cls.vapi.disconnect()
635 cls.logger.debug("Deleting class vapi attribute on %s",
639 if cls.vpp.returncode is None:
640 cls.wait_for_coredump()
641 cls.logger.debug("Sending TERM to vpp")
643 cls.logger.debug("Waiting for vpp to die")
644 cls.vpp.communicate()
645 cls.logger.debug("Deleting class vpp attribute on %s",
649 if cls.vpp_startup_failed:
650 stdout_log = cls.logger.info
651 stderr_log = cls.logger.critical
653 stdout_log = cls.logger.info
654 stderr_log = cls.logger.info
656 if hasattr(cls, 'vpp_stdout_deque'):
657 stdout_log(single_line_delim)
658 stdout_log('VPP output to stdout while running %s:', cls.__name__)
659 stdout_log(single_line_delim)
660 vpp_output = "".join(cls.vpp_stdout_deque)
661 with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
663 stdout_log('\n%s', vpp_output)
664 stdout_log(single_line_delim)
666 if hasattr(cls, 'vpp_stderr_deque'):
667 stderr_log(single_line_delim)
668 stderr_log('VPP output to stderr while running %s:', cls.__name__)
669 stderr_log(single_line_delim)
670 vpp_output = "".join(cls.vpp_stderr_deque)
671 with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
673 stderr_log('\n%s', vpp_output)
674 stderr_log(single_line_delim)
677 def tearDownClass(cls):
678 """ Perform final cleanup after running all tests in this test-case """
679 cls.logger.debug("--- tearDownClass() for %s called ---" %
681 cls.reporter.send_keep_alive(cls, 'tearDownClass')
683 cls.file_handler.close()
684 cls.reset_packet_infos()
686 debug_internal.on_tear_down_class(cls)
688 def show_commands_at_teardown(self):
689 """ Allow subclass specific teardown logging additions."""
690 self.logger.info("--- No test specific show commands provided. ---")
693 """ Show various debug prints after each test """
694 self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
695 (self.__class__.__name__, self._testMethodName,
696 self._testMethodDoc))
699 if not self.vpp_dead:
700 self.logger.debug(self.vapi.cli("show trace max 1000"))
701 self.logger.info(self.vapi.ppcli("show interface"))
702 self.logger.info(self.vapi.ppcli("show hardware"))
703 self.logger.info(self.statistics.set_errors_str())
704 self.logger.info(self.vapi.ppcli("show run"))
705 self.logger.info(self.vapi.ppcli("show log"))
706 self.logger.info(self.vapi.ppcli("show bihash"))
707 self.logger.info("Logging testcase specific show commands.")
708 self.show_commands_at_teardown()
709 self.registry.remove_vpp_config(self.logger)
710 # Save/Dump VPP api trace log
711 m = self._testMethodName
712 api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
713 tmp_api_trace = "/tmp/%s" % api_trace
714 vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
715 self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
716 self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
718 os.rename(tmp_api_trace, vpp_api_trace_log)
719 self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
721 except VppTransportShmemIOError:
722 self.logger.debug("VppTransportShmemIOError: Vpp dead. "
723 "Cannot log show commands.")
726 self.registry.unregister_all(self.logger)
729 """ Clear trace before running each test"""
730 super(VppTestCase, self).setUp()
731 self.reporter.send_keep_alive(self)
734 raise VppDiedError(rv=None, testcase=self.__class__.__name__,
735 method_name=self._testMethodName)
736 self.sleep(.1, "during setUp")
737 self.vpp_stdout_deque.append(
738 "--- test setUp() for %s.%s(%s) starts here ---\n" %
739 (self.__class__.__name__, self._testMethodName,
740 self._testMethodDoc))
741 self.vpp_stderr_deque.append(
742 "--- test setUp() for %s.%s(%s) starts here ---\n" %
743 (self.__class__.__name__, self._testMethodName,
744 self._testMethodDoc))
745 self.vapi.cli("clear trace")
746 # store the test instance inside the test class - so that objects
747 # holding the class can access instance methods (like assertEqual)
748 type(self).test_instance = self
751 def pg_enable_capture(cls, interfaces=None):
753 Enable capture on packet-generator interfaces
755 :param interfaces: iterable interface indexes (if None,
756 use self.pg_interfaces)
759 if interfaces is None:
760 interfaces = cls.pg_interfaces
765 def register_capture(cls, cap_name):
766 """ Register a capture in the testclass """
767 # add to the list of captures with current timestamp
768 cls._captures.append((time.time(), cap_name))
771 def get_vpp_time(cls):
772 # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
773 # returns float("2.190522")
774 timestr = cls.vapi.cli('show clock')
775 head, sep, tail = timestr.partition(',')
776 head, sep, tail = head.partition('Time now')
780 def sleep_on_vpp_time(cls, sec):
781 """ Sleep according to time in VPP world """
782 # On a busy system with many processes
783 # we might end up with VPP time being slower than real world
784 # So take that into account when waiting for VPP to do something
785 start_time = cls.get_vpp_time()
786 while cls.get_vpp_time() - start_time < sec:
791 """ Enable the PG, wait till it is done, then clean up """
792 cls.vapi.cli("trace add pg-input 1000")
793 cls.vapi.cli('packet-generator enable')
794 # PG, when starts, runs to completion -
795 # so let's avoid a race condition,
796 # and wait a little till it's done.
797 # Then clean it up - and then be gone.
798 deadline = time.time() + 300
799 while cls.vapi.cli('show packet-generator').find("Yes") != -1:
800 cls.sleep(0.01) # yield
801 if time.time() > deadline:
802 cls.logger.error("Timeout waiting for pg to stop")
804 for stamp, cap_name in cls._captures:
805 cls.vapi.cli('packet-generator delete %s' % cap_name)
809 def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
811 Create packet-generator interfaces.
813 :param interfaces: iterable indexes of the interfaces.
814 :returns: List of created interfaces.
819 intf = VppPGInterface(cls, i, gso, gso_size)
820 setattr(cls, intf.name, intf)
822 cls.pg_interfaces = result
826 def create_loopback_interfaces(cls, count):
828 Create loopback interfaces.
830 :param count: number of interfaces created.
831 :returns: List of created interfaces.
833 result = [VppLoInterface(cls) for i in range(count)]
835 setattr(cls, intf.name, intf)
836 cls.lo_interfaces = result
840 def create_bvi_interfaces(cls, count):
842 Create BVI interfaces.
844 :param count: number of interfaces created.
845 :returns: List of created interfaces.
847 result = [VppBviInterface(cls) for i in range(count)]
849 setattr(cls, intf.name, intf)
850 cls.bvi_interfaces = result
854 def extend_packet(packet, size, padding=' '):
856 Extend packet to given size by padding with spaces or custom padding
857 NOTE: Currently works only when Raw layer is present.
859 :param packet: packet
860 :param size: target size
861 :param padding: padding used to extend the payload
864 packet_len = len(packet) + 4
865 extend = size - packet_len
867 num = (extend // len(padding)) + 1
868 packet[Raw].load += (padding * num)[:extend].encode("ascii")
871 def reset_packet_infos(cls):
872 """ Reset the list of packet info objects and packet counts to zero """
873 cls._packet_infos = {}
874 cls._packet_count_for_dst_if_idx = {}
877 def create_packet_info(cls, src_if, dst_if):
879 Create packet info object containing the source and destination indexes
880 and add it to the testcase's packet info list
882 :param VppInterface src_if: source interface
883 :param VppInterface dst_if: destination interface
885 :returns: _PacketInfo object
889 info.index = len(cls._packet_infos)
890 info.src = src_if.sw_if_index
891 info.dst = dst_if.sw_if_index
892 if isinstance(dst_if, VppSubInterface):
893 dst_idx = dst_if.parent.sw_if_index
895 dst_idx = dst_if.sw_if_index
896 if dst_idx in cls._packet_count_for_dst_if_idx:
897 cls._packet_count_for_dst_if_idx[dst_idx] += 1
899 cls._packet_count_for_dst_if_idx[dst_idx] = 1
900 cls._packet_infos[info.index] = info
904 def info_to_payload(info):
906 Convert _PacketInfo object to packet payload
908 :param info: _PacketInfo object
910 :returns: string containing serialized data from packet info
912 return "%d %d %d %d %d" % (info.index, info.src, info.dst,
916 def payload_to_info(payload, payload_field='load'):
918 Convert packet payload to _PacketInfo object
920 :param payload: packet payload
921 :type payload: <class 'scapy.packet.Raw'>
922 :param payload_field: packet fieldname of payload "load" for
923 <class 'scapy.packet.Raw'>
924 :type payload_field: str
925 :returns: _PacketInfo object containing de-serialized data from payload
928 numbers = getattr(payload, payload_field).split()
930 info.index = int(numbers[0])
931 info.src = int(numbers[1])
932 info.dst = int(numbers[2])
933 info.ip = int(numbers[3])
934 info.proto = int(numbers[4])
937 def get_next_packet_info(self, info):
939 Iterate over the packet info list stored in the testcase
940 Start iteration with first element if info is None
941 Continue based on index in info if info is specified
943 :param info: info or None
944 :returns: next info in list or None if no more infos
949 next_index = info.index + 1
950 if next_index == len(self._packet_infos):
953 return self._packet_infos[next_index]
955 def get_next_packet_info_for_interface(self, src_index, info):
957 Search the packet info list for the next packet info with same source
960 :param src_index: source interface index to search for
961 :param info: packet info - where to start the search
962 :returns: packet info or None
966 info = self.get_next_packet_info(info)
969 if info.src == src_index:
972 def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
974 Search the packet info list for the next packet info with same source
975 and destination interface indexes
977 :param src_index: source interface index to search for
978 :param dst_index: destination interface index to search for
979 :param info: packet info - where to start the search
980 :returns: packet info or None
984 info = self.get_next_packet_info_for_interface(src_index, info)
987 if info.dst == dst_index:
990 def assert_equal(self, real_value, expected_value, name_or_class=None):
991 if name_or_class is None:
992 self.assertEqual(real_value, expected_value)
995 msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
996 msg = msg % (getdoc(name_or_class).strip(),
997 real_value, str(name_or_class(real_value)),
998 expected_value, str(name_or_class(expected_value)))
1000 msg = "Invalid %s: %s does not match expected value %s" % (
1001 name_or_class, real_value, expected_value)
1003 self.assertEqual(real_value, expected_value, msg)
1005 def assert_in_range(self,
1013 msg = "Invalid %s: %s out of range <%s,%s>" % (
1014 name, real_value, expected_min, expected_max)
1015 self.assertTrue(expected_min <= real_value <= expected_max, msg)
1017 def assert_packet_checksums_valid(self, packet,
1018 ignore_zero_udp_checksums=True):
1019 received = packet.__class__(scapy.compat.raw(packet))
1020 udp_layers = ['UDP', 'UDPerror']
1021 checksum_fields = ['cksum', 'chksum']
1024 temp = received.__class__(scapy.compat.raw(received))
1026 layer = temp.getlayer(counter)
1028 layer = layer.copy()
1029 layer.remove_payload()
1030 for cf in checksum_fields:
1031 if hasattr(layer, cf):
1032 if ignore_zero_udp_checksums and \
1033 0 == getattr(layer, cf) and \
1034 layer.name in udp_layers:
1036 delattr(temp.getlayer(counter), cf)
1037 checksums.append((counter, cf))
1040 counter = counter + 1
1041 if 0 == len(checksums):
1043 temp = temp.__class__(scapy.compat.raw(temp))
1044 for layer, cf in checksums:
1045 calc_sum = getattr(temp[layer], cf)
1047 getattr(received[layer], cf), calc_sum,
1048 "packet checksum on layer #%d: %s" % (layer, temp[layer].name))
1050 "Checksum field `%s` on `%s` layer has correct value `%s`" %
1051 (cf, temp[layer].name, calc_sum))
1053 def assert_checksum_valid(self, received_packet, layer,
1054 field_name='chksum',
1055 ignore_zero_checksum=False):
1056 """ Check checksum of received packet on given layer """
1057 received_packet_checksum = getattr(received_packet[layer], field_name)
1058 if ignore_zero_checksum and 0 == received_packet_checksum:
1060 recalculated = received_packet.__class__(
1061 scapy.compat.raw(received_packet))
1062 delattr(recalculated[layer], field_name)
1063 recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
1064 self.assert_equal(received_packet_checksum,
1065 getattr(recalculated[layer], field_name),
1066 "packet checksum on layer: %s" % layer)
1068 def assert_ip_checksum_valid(self, received_packet,
1069 ignore_zero_checksum=False):
1070 self.assert_checksum_valid(received_packet, 'IP',
1071 ignore_zero_checksum=ignore_zero_checksum)
1073 def assert_tcp_checksum_valid(self, received_packet,
1074 ignore_zero_checksum=False):
1075 self.assert_checksum_valid(received_packet, 'TCP',
1076 ignore_zero_checksum=ignore_zero_checksum)
1078 def assert_udp_checksum_valid(self, received_packet,
1079 ignore_zero_checksum=True):
1080 self.assert_checksum_valid(received_packet, 'UDP',
1081 ignore_zero_checksum=ignore_zero_checksum)
1083 def assert_embedded_icmp_checksum_valid(self, received_packet):
1084 if received_packet.haslayer(IPerror):
1085 self.assert_checksum_valid(received_packet, 'IPerror')
1086 if received_packet.haslayer(TCPerror):
1087 self.assert_checksum_valid(received_packet, 'TCPerror')
1088 if received_packet.haslayer(UDPerror):
1089 self.assert_checksum_valid(received_packet, 'UDPerror',
1090 ignore_zero_checksum=True)
1091 if received_packet.haslayer(ICMPerror):
1092 self.assert_checksum_valid(received_packet, 'ICMPerror')
1094 def assert_icmp_checksum_valid(self, received_packet):
1095 self.assert_checksum_valid(received_packet, 'ICMP')
1096 self.assert_embedded_icmp_checksum_valid(received_packet)
1098 def assert_icmpv6_checksum_valid(self, pkt):
1099 if pkt.haslayer(ICMPv6DestUnreach):
1100 self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
1101 self.assert_embedded_icmp_checksum_valid(pkt)
1102 if pkt.haslayer(ICMPv6EchoRequest):
1103 self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
1104 if pkt.haslayer(ICMPv6EchoReply):
1105 self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
1107 def get_packet_counter(self, counter):
1108 if counter.startswith("/"):
1109 counter_value = self.statistics.get_counter(counter)
1111 counters = self.vapi.cli("sh errors").split('\n')
1113 for i in range(1, len(counters) - 1):
1114 results = counters[i].split()
1115 if results[1] == counter:
1116 counter_value = int(results[0])
1118 return counter_value
1120 def assert_packet_counter_equal(self, counter, expected_value):
1121 counter_value = self.get_packet_counter(counter)
1122 self.assert_equal(counter_value, expected_value,
1123 "packet counter `%s'" % counter)
1125 def assert_error_counter_equal(self, counter, expected_value):
1126 counter_value = self.statistics.get_err_counter(counter)
1127 self.assert_equal(counter_value, expected_value,
1128 "error counter `%s'" % counter)
1131 def sleep(cls, timeout, remark=None):
1133 # /* Allow sleep(0) to maintain win32 semantics, and as decreed
1134 # * by Guido, only the main thread can be interrupted.
1136 # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
1139 if hasattr(os, 'sched_yield'):
1145 cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
1146 before = time.time()
1149 if after - before > 2 * timeout:
1150 cls.logger.error("unexpected self.sleep() result - "
1151 "slept for %es instead of ~%es!",
1152 after - before, timeout)
1155 "Finished sleep (%s) - slept %es (wanted %es)",
1156 remark, after - before, timeout)
1158 def pg_send(self, intf, pkts, worker=None):
1159 self.vapi.cli("clear trace")
1160 intf.add_stream(pkts, worker=worker)
1161 self.pg_enable_capture(self.pg_interfaces)
1164 def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
1165 self.pg_send(intf, pkts)
1168 for i in self.pg_interfaces:
1169 i.get_capture(0, timeout=timeout)
1170 i.assert_nothing_captured(remark=remark)
1173 def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None):
1176 self.pg_send(intf, pkts, worker=worker)
1177 rx = output.get_capture(n_rx)
1180 def send_and_expect_only(self, intf, pkts, output, timeout=None):
1181 self.pg_send(intf, pkts)
1182 rx = output.get_capture(len(pkts))
1186 for i in self.pg_interfaces:
1187 if i not in outputs:
1188 i.get_capture(0, timeout=timeout)
1189 i.assert_nothing_captured()
1195 def get_testcase_doc_name(test):
1196 return getdoc(test.__class__).splitlines()[0]
1199 def get_test_description(descriptions, test):
1200 short_description = test.shortDescription()
1201 if descriptions and short_description:
1202 return short_description
1207 class TestCaseInfo(object):
1208 def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
1209 self.logger = logger
1210 self.tempdir = tempdir
1211 self.vpp_pid = vpp_pid
1212 self.vpp_bin_path = vpp_bin_path
1213 self.core_crash_test = None
1216 class VppTestResult(unittest.TestResult):
1218 @property result_string
1219 String variable to store the test case result string.
1221 List variable containing 2-tuples of TestCase instances and strings
1222 holding formatted tracebacks. Each tuple represents a test which
1223 raised an unexpected exception.
1225 List variable containing 2-tuples of TestCase instances and strings
1226 holding formatted tracebacks. Each tuple represents a test where
1227 a failure was explicitly signalled using the TestCase.assert*()
1231 failed_test_cases_info = set()
1232 core_crash_test_cases_info = set()
1233 current_test_case_info = None
1235 def __init__(self, stream=None, descriptions=None, verbosity=None,
1238 :param stream File descriptor to store where to report test results.
1239 Set to the standard error stream by default.
1240 :param descriptions Boolean variable to store information if to use
1241 test case descriptions.
1242 :param verbosity Integer variable to store required verbosity level.
1244 super(VppTestResult, self).__init__(stream, descriptions, verbosity)
1245 self.stream = stream
1246 self.descriptions = descriptions
1247 self.verbosity = verbosity
1248 self.result_string = None
1249 self.runner = runner
1251 def addSuccess(self, test):
1253 Record a test succeeded result
1258 if self.current_test_case_info:
1259 self.current_test_case_info.logger.debug(
1260 "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
1261 test._testMethodName,
1262 test._testMethodDoc))
1263 unittest.TestResult.addSuccess(self, test)
1264 self.result_string = colorize("OK", GREEN)
1266 self.send_result_through_pipe(test, PASS)
1268 def addSkip(self, test, reason):
1270 Record a test skipped.
1276 if self.current_test_case_info:
1277 self.current_test_case_info.logger.debug(
1278 "--- addSkip() %s.%s(%s) called, reason is %s" %
1279 (test.__class__.__name__, test._testMethodName,
1280 test._testMethodDoc, reason))
1281 unittest.TestResult.addSkip(self, test, reason)
1282 self.result_string = colorize("SKIP", YELLOW)
1284 self.send_result_through_pipe(test, SKIP)
1286 def symlink_failed(self):
1287 if self.current_test_case_info:
1289 failed_dir = os.getenv('FAILED_DIR')
1290 link_path = os.path.join(
1293 os.path.basename(self.current_test_case_info.tempdir))
1295 self.current_test_case_info.logger.debug(
1296 "creating a link to the failed test")
1297 self.current_test_case_info.logger.debug(
1298 "os.symlink(%s, %s)" %
1299 (self.current_test_case_info.tempdir, link_path))
1300 if os.path.exists(link_path):
1301 self.current_test_case_info.logger.debug(
1302 'symlink already exists')
1304 os.symlink(self.current_test_case_info.tempdir, link_path)
1306 except Exception as e:
1307 self.current_test_case_info.logger.error(e)
1309 def send_result_through_pipe(self, test, result):
1310 if hasattr(self, 'test_framework_result_pipe'):
1311 pipe = self.test_framework_result_pipe
1313 pipe.send((test.id(), result))
1315 def log_error(self, test, err, fn_name):
1316 if self.current_test_case_info:
1317 if isinstance(test, unittest.suite._ErrorHolder):
1318 test_name = test.description
1320 test_name = '%s.%s(%s)' % (test.__class__.__name__,
1321 test._testMethodName,
1322 test._testMethodDoc)
1323 self.current_test_case_info.logger.debug(
1324 "--- %s() %s called, err is %s" %
1325 (fn_name, test_name, err))
1326 self.current_test_case_info.logger.debug(
1327 "formatted exception is:\n%s" %
1328 "".join(format_exception(*err)))
1330 def add_error(self, test, err, unittest_fn, error_type):
1331 if error_type == FAIL:
1332 self.log_error(test, err, 'addFailure')
1333 error_type_str = colorize("FAIL", RED)
1334 elif error_type == ERROR:
1335 self.log_error(test, err, 'addError')
1336 error_type_str = colorize("ERROR", RED)
1338 raise Exception('Error type %s cannot be used to record an '
1339 'error or a failure' % error_type)
1341 unittest_fn(self, test, err)
1342 if self.current_test_case_info:
1343 self.result_string = "%s [ temp dir used by test case: %s ]" % \
1345 self.current_test_case_info.tempdir)
1346 self.symlink_failed()
1347 self.failed_test_cases_info.add(self.current_test_case_info)
1348 if is_core_present(self.current_test_case_info.tempdir):
1349 if not self.current_test_case_info.core_crash_test:
1350 if isinstance(test, unittest.suite._ErrorHolder):
1351 test_name = str(test)
1353 test_name = "'{!s}' ({!s})".format(
1354 get_testcase_doc_name(test), test.id())
1355 self.current_test_case_info.core_crash_test = test_name
1356 self.core_crash_test_cases_info.add(
1357 self.current_test_case_info)
1359 self.result_string = '%s [no temp dir]' % error_type_str
1361 self.send_result_through_pipe(test, error_type)
1363 def addFailure(self, test, err):
1365 Record a test failed result
1368 :param err: error message
1371 self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
1373 def addError(self, test, err):
1375 Record a test error result
1378 :param err: error message
1381 self.add_error(test, err, unittest.TestResult.addError, ERROR)
1383 def getDescription(self, test):
1385 Get test description
1388 :returns: test description
1391 return get_test_description(self.descriptions, test)
1393 def startTest(self, test):
1401 def print_header(test):
1402 test_doc = getdoc(test)
1404 raise Exception("No doc string for test '%s'" % test.id())
1405 test_title = test_doc.splitlines()[0]
1406 test_title_colored = colorize(test_title, GREEN)
1407 if test.force_solo():
1408 # long live PEP-8 and 80 char width limitation...
1410 test_title_colored = colorize("SOLO RUN: " + test_title, c)
1412 if not hasattr(test.__class__, '_header_printed'):
1413 print(double_line_delim)
1414 print(test_title_colored)
1415 print(double_line_delim)
1416 test.__class__._header_printed = True
1419 self.start_test = time.time()
1420 unittest.TestResult.startTest(self, test)
1421 if self.verbosity > 0:
1422 self.stream.writeln(
1423 "Starting " + self.getDescription(test) + " ...")
1424 self.stream.writeln(single_line_delim)
1426 def stopTest(self, test):
1428 Called when the given test has been run
1433 unittest.TestResult.stopTest(self, test)
1435 if self.verbosity > 0:
1436 self.stream.writeln(single_line_delim)
1437 self.stream.writeln("%-73s%s" % (self.getDescription(test),
1438 self.result_string))
1439 self.stream.writeln(single_line_delim)
1441 self.stream.writeln("%-68s %4.2f %s" %
1442 (self.getDescription(test),
1443 time.time() - self.start_test,
1444 self.result_string))
1446 self.send_result_through_pipe(test, TEST_RUN)
1448 def printErrors(self):
1450 Print errors from running the test case
1452 if len(self.errors) > 0 or len(self.failures) > 0:
1453 self.stream.writeln()
1454 self.printErrorList('ERROR', self.errors)
1455 self.printErrorList('FAIL', self.failures)
1457 # ^^ that is the last output from unittest before summary
1458 if not self.runner.print_summary:
1459 devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
1460 self.stream = devnull
1461 self.runner.stream = devnull
1463 def printErrorList(self, flavour, errors):
1465 Print error list to the output stream together with error type
1466 and test case description.
1468 :param flavour: error type
1469 :param errors: iterable errors
1472 for test, err in errors:
1473 self.stream.writeln(double_line_delim)
1474 self.stream.writeln("%s: %s" %
1475 (flavour, self.getDescription(test)))
1476 self.stream.writeln(single_line_delim)
1477 self.stream.writeln("%s" % err)
1480 class VppTestRunner(unittest.TextTestRunner):
1482 A basic test runner implementation which prints results to standard error.
1486 def resultclass(self):
1487 """Class maintaining the results of the tests"""
1488 return VppTestResult
1490 def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
1491 result_pipe=None, failfast=False, buffer=False,
1492 resultclass=None, print_summary=True, **kwargs):
1493 # ignore stream setting here, use hard-coded stdout to be in sync
1494 # with prints from VppTestCase methods ...
1495 super(VppTestRunner, self).__init__(sys.stdout, descriptions,
1496 verbosity, failfast, buffer,
1497 resultclass, **kwargs)
1498 KeepAliveReporter.pipe = keep_alive_pipe
1500 self.orig_stream = self.stream
1501 self.resultclass.test_framework_result_pipe = result_pipe
1503 self.print_summary = print_summary
1505 def _makeResult(self):
1506 return self.resultclass(self.stream,
1511 def run(self, test):
1518 faulthandler.enable() # emit stack trace to stderr if killed by signal
1520 result = super(VppTestRunner, self).run(test)
1521 if not self.print_summary:
1522 self.stream = self.orig_stream
1523 result.stream = self.orig_stream
1527 class Worker(Thread):
1528 def __init__(self, executable_args, logger, env=None, *args, **kwargs):
1529 super(Worker, self).__init__(*args, **kwargs)
1530 self.logger = logger
1531 self.args = executable_args
1532 if hasattr(self, 'testcase') and self.testcase.debug_all:
1533 if self.testcase.debug_gdbserver:
1534 self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
1535 .format(port=self.testcase.gdbserver_port)] + args
1536 elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
1537 self.args.append(self.wait_for_gdb)
1538 self.app_bin = executable_args[0]
1539 self.app_name = os.path.basename(self.app_bin)
1540 if hasattr(self, 'role'):
1541 self.app_name += ' {role}'.format(role=self.role)
1544 env = {} if env is None else env
1545 self.env = copy.deepcopy(env)
1547 def wait_for_enter(self):
1548 if not hasattr(self, 'testcase'):
1550 if self.testcase.debug_all and self.testcase.debug_gdbserver:
1552 print(double_line_delim)
1553 print("Spawned GDB Server for '{app}' with PID: {pid}"
1554 .format(app=self.app_name, pid=self.process.pid))
1555 elif self.testcase.debug_all and self.testcase.debug_gdb:
1557 print(double_line_delim)
1558 print("Spawned '{app}' with PID: {pid}"
1559 .format(app=self.app_name, pid=self.process.pid))
1562 print(single_line_delim)
1563 print("You can debug '{app}' using:".format(app=self.app_name))
1564 if self.testcase.debug_gdbserver:
1565 print("sudo gdb " + self.app_bin +
1566 " -ex 'target remote localhost:{port}'"
1567 .format(port=self.testcase.gdbserver_port))
1568 print("Now is the time to attach gdb by running the above "
1569 "command, set up breakpoints etc., then resume from "
1570 "within gdb by issuing the 'continue' command")
1571 self.testcase.gdbserver_port += 1
1572 elif self.testcase.debug_gdb:
1573 print("sudo gdb " + self.app_bin +
1574 " -ex 'attach {pid}'".format(pid=self.process.pid))
1575 print("Now is the time to attach gdb by running the above "
1576 "command and set up breakpoints etc., then resume from"
1577 " within gdb by issuing the 'continue' command")
1578 print(single_line_delim)
1579 input("Press ENTER to continue running the testcase...")
1582 executable = self.args[0]
1583 if not os.path.exists(executable) or not os.access(
1584 executable, os.F_OK | os.X_OK):
1585 # Exit code that means some system file did not exist,
1586 # could not be opened, or had some other kind of error.
1587 self.result = os.EX_OSFILE
1588 raise EnvironmentError(
1589 "executable '%s' is not found or executable." % executable)
1590 self.logger.debug("Running executable: '{app}'"
1591 .format(app=' '.join(self.args)))
1592 env = os.environ.copy()
1593 env.update(self.env)
1594 env["CK_LOG_FILE_NAME"] = "-"
1595 self.process = subprocess.Popen(
1596 self.args, shell=False, env=env, preexec_fn=os.setpgrp,
1597 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1598 self.wait_for_enter()
1599 out, err = self.process.communicate()
1600 self.logger.debug("Finished running `{app}'".format(app=self.app_name))
1601 self.logger.info("Return code is `%s'" % self.process.returncode)
1602 self.logger.info(single_line_delim)
1603 self.logger.info("Executable `{app}' wrote to stdout:"
1604 .format(app=self.app_name))
1605 self.logger.info(single_line_delim)
1606 self.logger.info(out.decode('utf-8'))
1607 self.logger.info(single_line_delim)
1608 self.logger.info("Executable `{app}' wrote to stderr:"
1609 .format(app=self.app_name))
1610 self.logger.info(single_line_delim)
1611 self.logger.info(err.decode('utf-8'))
1612 self.logger.info(single_line_delim)
1613 self.result = self.process.returncode
1616 if __name__ == '__main__':