3 from __future__ import print_function
17 from collections import deque
18 from threading import Thread, Event
19 from inspect import getdoc, isclass
20 from traceback import format_exception
21 from logging import FileHandler, DEBUG, Formatter
24 from scapy.packet import Raw
25 import hook as hookmodule
26 from vpp_pg_interface import VppPGInterface
27 from vpp_sub_interface import VppSubInterface
28 from vpp_lo_interface import VppLoInterface
29 from vpp_bvi_interface import VppBviInterface
30 from vpp_papi_provider import VppPapiProvider
32 from vpp_papi.vpp_stats import VPPStats
33 from vpp_papi.vpp_transport_shmem import VppTransportShmemIOError
34 from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
36 from vpp_object import VppObjectRegistry
37 from util import ppp, is_core_present
38 from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
39 from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
40 from scapy.layers.inet6 import ICMPv6EchoReply
42 if os.name == 'posix' and sys.version_info[0] < 3:
43 # using subprocess32 is recommended by python official documentation
44 # @ https://docs.python.org/2/library/subprocess.html
45 import subprocess32 as subprocess
49 # Python2/3 compatible
62 class BoolEnvironmentVariable(object):
64 def __init__(self, env_var_name, default='n', true_values=None):
65 self.name = env_var_name
66 self.default = default
67 self.true_values = true_values if true_values is not None else \
71 return os.getenv(self.name, self.default).lower() in self.true_values
73 if sys.version_info[0] == 2:
74 __nonzero__ = __bool__
77 return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
78 (self.name, self.default, self.true_values)
81 debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
86 Test framework module.
88 The module provides a set of tools for constructing and running tests and
89 representing the results.
93 class VppDiedError(Exception):
94 """ exception for reporting that the subprocess has died."""
96 signals_by_value = {v: k for k, v in signal.__dict__.items() if
97 k.startswith('SIG') and not k.startswith('SIG_')}
99 def __init__(self, rv=None, testcase=None, method_name=None):
101 self.signal_name = None
102 self.testcase = testcase
103 self.method_name = method_name
106 self.signal_name = VppDiedError.signals_by_value[-rv]
107 except (KeyError, TypeError):
110 if testcase is None and method_name is None:
113 in_msg = 'running %s.%s ' % (testcase, method_name)
115 msg = "VPP subprocess died %sunexpectedly with return code: %d%s." % (
118 ' [%s]' % (self.signal_name if
119 self.signal_name is not None else ''))
120 super(VppDiedError, self).__init__(msg)
123 class _PacketInfo(object):
124 """Private class to create packet info object.
126 Help process information about the next packet.
127 Set variables to default values.
129 #: Store the index of the packet.
131 #: Store the index of the source packet generator interface of the packet.
133 #: Store the index of the destination packet generator interface
136 #: Store expected ip version
138 #: Store expected upper protocol
140 #: Store the copy of the former packet.
143 def __eq__(self, other):
144 index = self.index == other.index
145 src = self.src == other.src
146 dst = self.dst == other.dst
147 data = self.data == other.data
148 return index and src and dst and data
151 def pump_output(testclass):
152 """ pump output from vpp stdout/stderr to proper queues """
155 while not testclass.pump_thread_stop_flag.is_set():
156 readable = select.select([testclass.vpp.stdout.fileno(),
157 testclass.vpp.stderr.fileno(),
158 testclass.pump_thread_wakeup_pipe[0]],
160 if testclass.vpp.stdout.fileno() in readable:
161 read = os.read(testclass.vpp.stdout.fileno(), 102400)
163 split = read.decode('ascii',
164 errors='backslashreplace').splitlines(True)
165 if len(stdout_fragment) > 0:
166 split[0] = "%s%s" % (stdout_fragment, split[0])
167 if len(split) > 0 and split[-1].endswith("\n"):
171 stdout_fragment = split[-1]
172 testclass.vpp_stdout_deque.extend(split[:limit])
173 if not testclass.cache_vpp_output:
174 for line in split[:limit]:
175 testclass.logger.info(
176 "VPP STDOUT: %s" % line.rstrip("\n"))
177 if testclass.vpp.stderr.fileno() in readable:
178 read = os.read(testclass.vpp.stderr.fileno(), 102400)
180 split = read.decode('ascii',
181 errors='backslashreplace').splitlines(True)
182 if len(stderr_fragment) > 0:
183 split[0] = "%s%s" % (stderr_fragment, split[0])
184 if len(split) > 0 and split[-1].endswith("\n"):
188 stderr_fragment = split[-1]
190 testclass.vpp_stderr_deque.extend(split[:limit])
191 if not testclass.cache_vpp_output:
192 for line in split[:limit]:
193 testclass.logger.error(
194 "VPP STDERR: %s" % line.rstrip("\n"))
195 # ignoring the dummy pipe here intentionally - the
196 # flag will take care of properly terminating the loop
199 def _is_skip_aarch64_set():
200 return BoolEnvironmentVariable('SKIP_AARCH64')
203 is_skip_aarch64_set = _is_skip_aarch64_set()
206 def _is_platform_aarch64():
207 return platform.machine() == 'aarch64'
210 is_platform_aarch64 = _is_platform_aarch64()
213 def _running_extended_tests():
214 return BoolEnvironmentVariable("EXTENDED_TESTS")
217 running_extended_tests = _running_extended_tests()
220 def _running_gcov_tests():
221 return BoolEnvironmentVariable("GCOV_TESTS")
224 running_gcov_tests = _running_gcov_tests()
227 def _running_on_centos():
228 os_id = os.getenv("OS_ID", "")
229 return True if "centos" in os_id.lower() else False
232 running_on_centos = _running_on_centos()
235 class KeepAliveReporter(object):
237 Singleton object which reports test start to parent process
242 self.__dict__ = self._shared_state
250 def pipe(self, pipe):
251 if self._pipe is not None:
252 raise Exception("Internal error - pipe should only be set once.")
255 def send_keep_alive(self, test, desc=None):
257 Write current test tmpdir & desc to keep-alive pipe to signal liveness
259 if self.pipe is None:
260 # if not running forked..
264 desc = '%s (%s)' % (desc, unittest.util.strclass(test))
268 self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
271 class VppTestCase(unittest.TestCase):
272 """This subclass is a base class for VPP test cases that are implemented as
273 classes. It provides methods to create and run test case.
276 extra_vpp_punt_config = []
277 extra_vpp_plugin_config = []
278 vapi_response_timeout = 5
281 def packet_infos(self):
282 """List of packet infos"""
283 return self._packet_infos
286 def get_packet_count_for_if_idx(cls, dst_if_index):
287 """Get the number of packet info for specified destination if index"""
288 if dst_if_index in cls._packet_count_for_dst_if_idx:
289 return cls._packet_count_for_dst_if_idx[dst_if_index]
295 """Return the instance of this testcase"""
296 return cls.test_instance
299 def set_debug_flags(cls, d):
300 cls.gdbserver_port = 7777
301 cls.debug_core = False
302 cls.debug_gdb = False
303 cls.debug_gdbserver = False
304 cls.debug_all = False
309 cls.debug_core = True
310 elif dl == "gdb" or dl == "gdb-all":
312 elif dl == "gdbserver" or dl == "gdbserver-all":
313 cls.debug_gdbserver = True
315 raise Exception("Unrecognized DEBUG option: '%s'" % d)
316 if dl == "gdb-all" or dl == "gdbserver-all":
320 def get_least_used_cpu():
321 cpu_usage_list = [set(range(psutil.cpu_count()))]
322 vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
323 if 'vpp_main' == p.info['name']]
324 for vpp_process in vpp_processes:
325 for cpu_usage_set in cpu_usage_list:
327 cpu_num = vpp_process.cpu_num()
328 if cpu_num in cpu_usage_set:
329 cpu_usage_set_index = cpu_usage_list.index(
331 if cpu_usage_set_index == len(cpu_usage_list) - 1:
332 cpu_usage_list.append({cpu_num})
334 cpu_usage_list[cpu_usage_set_index + 1].add(
336 cpu_usage_set.remove(cpu_num)
338 except psutil.NoSuchProcess:
341 for cpu_usage_set in cpu_usage_list:
342 if len(cpu_usage_set) > 0:
343 min_usage_set = cpu_usage_set
346 return random.choice(tuple(min_usage_set))
349 def setUpConstants(cls):
350 """ Set-up the test case class based on environment variables """
351 cls.step = BoolEnvironmentVariable('STEP')
352 d = os.getenv("DEBUG", None)
353 # inverted case to handle '' == True
354 c = os.getenv("CACHE_OUTPUT", "1")
355 cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
356 cls.set_debug_flags(d)
357 cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
358 cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
359 cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
360 cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
362 if cls.plugin_path is not None:
363 if cls.extern_plugin_path is not None:
364 plugin_path = "%s:%s" % (
365 cls.plugin_path, cls.extern_plugin_path)
367 plugin_path = cls.plugin_path
368 elif cls.extern_plugin_path is not None:
369 plugin_path = cls.extern_plugin_path
371 if cls.step or cls.debug_gdb or cls.debug_gdbserver:
372 debug_cli = "cli-listen localhost:5002"
374 size = os.getenv("COREDUMP_SIZE")
376 coredump_size = "coredump-size %s" % size
377 if coredump_size is None:
378 coredump_size = "coredump-size unlimited"
380 cpu_core_number = cls.get_least_used_cpu()
381 if not hasattr(cls, "worker_config"):
382 cls.worker_config = ""
384 default_variant = os.getenv("VARIANT")
385 if default_variant is not None:
386 default_variant = "defaults { %s 100 }" % default_variant
390 api_fuzzing = os.getenv("API_FUZZ")
391 if api_fuzzing is None:
394 cls.vpp_cmdline = [cls.vpp_bin, "unix",
395 "{", "nodaemon", debug_cli, "full-coredump",
396 coredump_size, "runtime-dir", cls.tempdir, "}",
397 "api-trace", "{", "on", "}", "api-segment", "{",
398 "prefix", cls.shm_prefix, "}", "cpu", "{",
399 "main-core", str(cpu_core_number),
400 cls.worker_config, "}",
401 "physmem", "{", "max-size", "32m", "}",
402 "statseg", "{", "socket-name", cls.stats_sock, "}",
403 "socksvr", "{", "socket-name", cls.api_sock, "}",
404 "node { ", default_variant, "}",
405 "api-fuzz {", api_fuzzing, "}",
407 "{", "plugin", "dpdk_plugin.so", "{", "disable",
408 "}", "plugin", "rdma_plugin.so", "{", "disable",
409 "}", "plugin", "unittest_plugin.so", "{", "enable",
410 "}"] + cls.extra_vpp_plugin_config + ["}", ]
412 if cls.extra_vpp_punt_config is not None:
413 cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
414 if plugin_path is not None:
415 cls.vpp_cmdline.extend(["plugin_path", plugin_path])
416 if cls.test_plugin_path is not None:
417 cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
419 cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
420 cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
423 def wait_for_enter(cls):
424 if cls.debug_gdbserver:
425 print(double_line_delim)
426 print("Spawned GDB server with PID: %d" % cls.vpp.pid)
428 print(double_line_delim)
429 print("Spawned VPP with PID: %d" % cls.vpp.pid)
431 cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
433 print(single_line_delim)
434 print("You can debug VPP using:")
435 if cls.debug_gdbserver:
436 print("sudo gdb " + cls.vpp_bin +
437 " -ex 'target remote localhost:{port}'"
438 .format(port=cls.gdbserver_port))
439 print("Now is the time to attach gdb by running the above "
440 "command, set up breakpoints etc., then resume VPP from "
441 "within gdb by issuing the 'continue' command")
442 cls.gdbserver_port += 1
444 print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
445 print("Now is the time to attach gdb by running the above "
446 "command and set up breakpoints etc., then resume VPP from"
447 " within gdb by issuing the 'continue' command")
448 print(single_line_delim)
449 input("Press ENTER to continue running the testcase...")
453 cmdline = cls.vpp_cmdline
455 if cls.debug_gdbserver:
456 gdbserver = '/usr/bin/gdbserver'
457 if not os.path.isfile(gdbserver) or \
458 not os.access(gdbserver, os.X_OK):
459 raise Exception("gdbserver binary '%s' does not exist or is "
460 "not executable" % gdbserver)
462 cmdline = [gdbserver, 'localhost:{port}'
463 .format(port=cls.gdbserver_port)] + cls.vpp_cmdline
464 cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
467 cls.vpp = subprocess.Popen(cmdline,
468 stdout=subprocess.PIPE,
469 stderr=subprocess.PIPE)
470 except subprocess.CalledProcessError as e:
471 cls.logger.critical("Subprocess returned with non-0 return code: ("
475 cls.logger.critical("Subprocess returned with OS error: "
476 "(%s) %s", e.errno, e.strerror)
478 except Exception as e:
479 cls.logger.exception("Subprocess returned unexpected from "
486 def wait_for_coredump(cls):
487 corefile = cls.tempdir + "/core"
488 if os.path.isfile(corefile):
489 cls.logger.error("Waiting for coredump to complete: %s", corefile)
490 curr_size = os.path.getsize(corefile)
491 deadline = time.time() + 60
493 while time.time() < deadline:
496 curr_size = os.path.getsize(corefile)
497 if size == curr_size:
501 cls.logger.error("Timed out waiting for coredump to complete:"
504 cls.logger.error("Coredump complete: %s, size %d",
510 Perform class setup before running the testcase
511 Remove shared memory files, start vpp and connect the vpp-api
513 super(VppTestCase, cls).setUpClass()
514 gc.collect() # run garbage collection first
515 cls.logger = get_logger(cls.__name__)
516 seed = os.environ["RND_SEED"]
518 if hasattr(cls, 'parallel_handler'):
519 cls.logger.addHandler(cls.parallel_handler)
520 cls.logger.propagate = False
522 cls.tempdir = tempfile.mkdtemp(
523 prefix='vpp-unittest-%s-' % cls.__name__)
524 cls.stats_sock = "%s/stats.sock" % cls.tempdir
525 cls.api_sock = "%s/api.sock" % cls.tempdir
526 cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
527 cls.file_handler.setFormatter(
528 Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
530 cls.file_handler.setLevel(DEBUG)
531 cls.logger.addHandler(cls.file_handler)
532 cls.logger.debug("--- setUpClass() for %s called ---" %
534 cls.shm_prefix = os.path.basename(cls.tempdir)
535 os.chdir(cls.tempdir)
536 cls.logger.info("Temporary dir is %s, shm prefix is %s",
537 cls.tempdir, cls.shm_prefix)
538 cls.logger.debug("Random seed is %s" % seed)
540 cls.reset_packet_infos()
544 cls.registry = VppObjectRegistry()
545 cls.vpp_startup_failed = False
546 cls.reporter = KeepAliveReporter()
547 # need to catch exceptions here because if we raise, then the cleanup
548 # doesn't get called and we might end with a zombie vpp
551 cls.reporter.send_keep_alive(cls, 'setUpClass')
552 VppTestResult.current_test_case_info = TestCaseInfo(
553 cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
554 cls.vpp_stdout_deque = deque()
555 cls.vpp_stderr_deque = deque()
556 cls.pump_thread_stop_flag = Event()
557 cls.pump_thread_wakeup_pipe = os.pipe()
558 cls.pump_thread = Thread(target=pump_output, args=(cls,))
559 cls.pump_thread.daemon = True
560 cls.pump_thread.start()
561 if cls.debug_gdb or cls.debug_gdbserver:
562 cls.vapi_response_timeout = 0
563 cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
564 cls.vapi_response_timeout)
566 hook = hookmodule.StepHook(cls)
568 hook = hookmodule.PollHook(cls)
569 cls.vapi.register_hook(hook)
570 cls.statistics = VPPStats(socketname=cls.stats_sock)
574 cls.vpp_startup_failed = True
576 "VPP died shortly after startup, check the"
577 " output to standard error for possible cause")
581 except vpp_papi.VPPIOError as e:
582 cls.logger.debug("Exception connecting to vapi: %s" % e)
583 cls.vapi.disconnect()
585 if cls.debug_gdbserver:
586 print(colorize("You're running VPP inside gdbserver but "
587 "VPP-API connection failed, did you forget "
588 "to 'continue' VPP from within gdb?", RED))
590 except vpp_papi.VPPRuntimeError as e:
591 cls.logger.debug("%s" % e)
594 except Exception as e:
595 cls.logger.debug("Exception connecting to VPP: %s" % e)
600 def _debug_quit(cls):
601 if (cls.debug_gdbserver or cls.debug_gdb):
605 if cls.vpp.returncode is None:
607 print(double_line_delim)
608 print("VPP or GDB server is still running")
609 print(single_line_delim)
610 input("When done debugging, press ENTER to kill the "
611 "process and finish running the testcase...")
612 except AttributeError:
618 Disconnect vpp-api, kill vpp and cleanup shared memory files
622 # first signal that we want to stop the pump thread, then wake it up
623 if hasattr(cls, 'pump_thread_stop_flag'):
624 cls.pump_thread_stop_flag.set()
625 if hasattr(cls, 'pump_thread_wakeup_pipe'):
626 os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
627 if hasattr(cls, 'pump_thread'):
628 cls.logger.debug("Waiting for pump thread to stop")
629 cls.pump_thread.join()
630 if hasattr(cls, 'vpp_stderr_reader_thread'):
631 cls.logger.debug("Waiting for stderr pump to stop")
632 cls.vpp_stderr_reader_thread.join()
634 if hasattr(cls, 'vpp'):
635 if hasattr(cls, 'vapi'):
636 cls.logger.debug(cls.vapi.vpp.get_stats())
637 cls.logger.debug("Disconnecting class vapi client on %s",
639 cls.vapi.disconnect()
640 cls.logger.debug("Deleting class vapi attribute on %s",
644 if cls.vpp.returncode is None:
645 cls.wait_for_coredump()
646 cls.logger.debug("Sending TERM to vpp")
648 cls.logger.debug("Waiting for vpp to die")
649 cls.vpp.communicate()
650 cls.logger.debug("Deleting class vpp attribute on %s",
654 if cls.vpp_startup_failed:
655 stdout_log = cls.logger.info
656 stderr_log = cls.logger.critical
658 stdout_log = cls.logger.info
659 stderr_log = cls.logger.info
661 if hasattr(cls, 'vpp_stdout_deque'):
662 stdout_log(single_line_delim)
663 stdout_log('VPP output to stdout while running %s:', cls.__name__)
664 stdout_log(single_line_delim)
665 vpp_output = "".join(cls.vpp_stdout_deque)
666 with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
668 stdout_log('\n%s', vpp_output)
669 stdout_log(single_line_delim)
671 if hasattr(cls, 'vpp_stderr_deque'):
672 stderr_log(single_line_delim)
673 stderr_log('VPP output to stderr while running %s:', cls.__name__)
674 stderr_log(single_line_delim)
675 vpp_output = "".join(cls.vpp_stderr_deque)
676 with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
678 stderr_log('\n%s', vpp_output)
679 stderr_log(single_line_delim)
682 def tearDownClass(cls):
683 """ Perform final cleanup after running all tests in this test-case """
684 cls.logger.debug("--- tearDownClass() for %s called ---" %
686 cls.reporter.send_keep_alive(cls, 'tearDownClass')
688 cls.file_handler.close()
689 cls.reset_packet_infos()
691 debug_internal.on_tear_down_class(cls)
693 def show_commands_at_teardown(self):
694 """ Allow subclass specific teardown logging additions."""
695 self.logger.info("--- No test specific show commands provided. ---")
698 """ Show various debug prints after each test """
699 self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
700 (self.__class__.__name__, self._testMethodName,
701 self._testMethodDoc))
704 if not self.vpp_dead:
705 self.logger.debug(self.vapi.cli("show trace max 1000"))
706 self.logger.info(self.vapi.ppcli("show interface"))
707 self.logger.info(self.vapi.ppcli("show hardware"))
708 self.logger.info(self.statistics.set_errors_str())
709 self.logger.info(self.vapi.ppcli("show run"))
710 self.logger.info(self.vapi.ppcli("show log"))
711 self.logger.info(self.vapi.ppcli("show bihash"))
712 self.logger.info("Logging testcase specific show commands.")
713 self.show_commands_at_teardown()
714 self.registry.remove_vpp_config(self.logger)
715 # Save/Dump VPP api trace log
716 m = self._testMethodName
717 api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
718 tmp_api_trace = "/tmp/%s" % api_trace
719 vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
720 self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
721 self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
723 os.rename(tmp_api_trace, vpp_api_trace_log)
724 self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
726 except VppTransportShmemIOError:
727 self.logger.debug("VppTransportShmemIOError: Vpp dead. "
728 "Cannot log show commands.")
731 self.registry.unregister_all(self.logger)
734 """ Clear trace before running each test"""
735 super(VppTestCase, self).setUp()
736 self.reporter.send_keep_alive(self)
739 raise VppDiedError(rv=None, testcase=self.__class__.__name__,
740 method_name=self._testMethodName)
741 self.sleep(.1, "during setUp")
742 self.vpp_stdout_deque.append(
743 "--- test setUp() for %s.%s(%s) starts here ---\n" %
744 (self.__class__.__name__, self._testMethodName,
745 self._testMethodDoc))
746 self.vpp_stderr_deque.append(
747 "--- test setUp() for %s.%s(%s) starts here ---\n" %
748 (self.__class__.__name__, self._testMethodName,
749 self._testMethodDoc))
750 self.vapi.cli("clear trace")
751 # store the test instance inside the test class - so that objects
752 # holding the class can access instance methods (like assertEqual)
753 type(self).test_instance = self
756 def pg_enable_capture(cls, interfaces=None):
758 Enable capture on packet-generator interfaces
760 :param interfaces: iterable interface indexes (if None,
761 use self.pg_interfaces)
764 if interfaces is None:
765 interfaces = cls.pg_interfaces
770 def register_capture(cls, cap_name):
771 """ Register a capture in the testclass """
772 # add to the list of captures with current timestamp
773 cls._captures.append((time.time(), cap_name))
776 def get_vpp_time(cls):
777 # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
778 # returns float("2.190522")
779 timestr = cls.vapi.cli('show clock')
780 head, sep, tail = timestr.partition(',')
781 head, sep, tail = head.partition('Time now')
785 def sleep_on_vpp_time(cls, sec):
786 """ Sleep according to time in VPP world """
787 # On a busy system with many processes
788 # we might end up with VPP time being slower than real world
789 # So take that into account when waiting for VPP to do something
790 start_time = cls.get_vpp_time()
791 while cls.get_vpp_time() - start_time < sec:
796 """ Enable the PG, wait till it is done, then clean up """
797 cls.vapi.cli("trace add pg-input 1000")
798 cls.vapi.cli('packet-generator enable')
799 # PG, when starts, runs to completion -
800 # so let's avoid a race condition,
801 # and wait a little till it's done.
802 # Then clean it up - and then be gone.
803 deadline = time.time() + 300
804 while cls.vapi.cli('show packet-generator').find("Yes") != -1:
805 cls.sleep(0.01) # yield
806 if time.time() > deadline:
807 cls.logger.error("Timeout waiting for pg to stop")
809 for stamp, cap_name in cls._captures:
810 cls.vapi.cli('packet-generator delete %s' % cap_name)
814 def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
816 Create packet-generator interfaces.
818 :param interfaces: iterable indexes of the interfaces.
819 :returns: List of created interfaces.
824 intf = VppPGInterface(cls, i, gso, gso_size)
825 setattr(cls, intf.name, intf)
827 cls.pg_interfaces = result
831 def create_loopback_interfaces(cls, count):
833 Create loopback interfaces.
835 :param count: number of interfaces created.
836 :returns: List of created interfaces.
838 result = [VppLoInterface(cls) for i in range(count)]
840 setattr(cls, intf.name, intf)
841 cls.lo_interfaces = result
845 def create_bvi_interfaces(cls, count):
847 Create BVI interfaces.
849 :param count: number of interfaces created.
850 :returns: List of created interfaces.
852 result = [VppBviInterface(cls) for i in range(count)]
854 setattr(cls, intf.name, intf)
855 cls.bvi_interfaces = result
859 def extend_packet(packet, size, padding=' '):
861 Extend packet to given size by padding with spaces or custom padding
862 NOTE: Currently works only when Raw layer is present.
864 :param packet: packet
865 :param size: target size
866 :param padding: padding used to extend the payload
869 packet_len = len(packet) + 4
870 extend = size - packet_len
872 num = (extend // len(padding)) + 1
873 packet[Raw].load += (padding * num)[:extend].encode("ascii")
876 def reset_packet_infos(cls):
877 """ Reset the list of packet info objects and packet counts to zero """
878 cls._packet_infos = {}
879 cls._packet_count_for_dst_if_idx = {}
882 def create_packet_info(cls, src_if, dst_if):
884 Create packet info object containing the source and destination indexes
885 and add it to the testcase's packet info list
887 :param VppInterface src_if: source interface
888 :param VppInterface dst_if: destination interface
890 :returns: _PacketInfo object
894 info.index = len(cls._packet_infos)
895 info.src = src_if.sw_if_index
896 info.dst = dst_if.sw_if_index
897 if isinstance(dst_if, VppSubInterface):
898 dst_idx = dst_if.parent.sw_if_index
900 dst_idx = dst_if.sw_if_index
901 if dst_idx in cls._packet_count_for_dst_if_idx:
902 cls._packet_count_for_dst_if_idx[dst_idx] += 1
904 cls._packet_count_for_dst_if_idx[dst_idx] = 1
905 cls._packet_infos[info.index] = info
909 def info_to_payload(info):
911 Convert _PacketInfo object to packet payload
913 :param info: _PacketInfo object
915 :returns: string containing serialized data from packet info
917 return "%d %d %d %d %d" % (info.index, info.src, info.dst,
921 def payload_to_info(payload, payload_field='load'):
923 Convert packet payload to _PacketInfo object
925 :param payload: packet payload
926 :type payload: <class 'scapy.packet.Raw'>
927 :param payload_field: packet fieldname of payload "load" for
928 <class 'scapy.packet.Raw'>
929 :type payload_field: str
930 :returns: _PacketInfo object containing de-serialized data from payload
933 numbers = getattr(payload, payload_field).split()
935 info.index = int(numbers[0])
936 info.src = int(numbers[1])
937 info.dst = int(numbers[2])
938 info.ip = int(numbers[3])
939 info.proto = int(numbers[4])
942 def get_next_packet_info(self, info):
944 Iterate over the packet info list stored in the testcase
945 Start iteration with first element if info is None
946 Continue based on index in info if info is specified
948 :param info: info or None
949 :returns: next info in list or None if no more infos
954 next_index = info.index + 1
955 if next_index == len(self._packet_infos):
958 return self._packet_infos[next_index]
960 def get_next_packet_info_for_interface(self, src_index, info):
962 Search the packet info list for the next packet info with same source
965 :param src_index: source interface index to search for
966 :param info: packet info - where to start the search
967 :returns: packet info or None
971 info = self.get_next_packet_info(info)
974 if info.src == src_index:
977 def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
979 Search the packet info list for the next packet info with same source
980 and destination interface indexes
982 :param src_index: source interface index to search for
983 :param dst_index: destination interface index to search for
984 :param info: packet info - where to start the search
985 :returns: packet info or None
989 info = self.get_next_packet_info_for_interface(src_index, info)
992 if info.dst == dst_index:
995 def assert_equal(self, real_value, expected_value, name_or_class=None):
996 if name_or_class is None:
997 self.assertEqual(real_value, expected_value)
1000 msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
1001 msg = msg % (getdoc(name_or_class).strip(),
1002 real_value, str(name_or_class(real_value)),
1003 expected_value, str(name_or_class(expected_value)))
1005 msg = "Invalid %s: %s does not match expected value %s" % (
1006 name_or_class, real_value, expected_value)
1008 self.assertEqual(real_value, expected_value, msg)
1010 def assert_in_range(self,
1018 msg = "Invalid %s: %s out of range <%s,%s>" % (
1019 name, real_value, expected_min, expected_max)
1020 self.assertTrue(expected_min <= real_value <= expected_max, msg)
1022 def assert_packet_checksums_valid(self, packet,
1023 ignore_zero_udp_checksums=True):
1024 received = packet.__class__(scapy.compat.raw(packet))
1025 udp_layers = ['UDP', 'UDPerror']
1026 checksum_fields = ['cksum', 'chksum']
1029 temp = received.__class__(scapy.compat.raw(received))
1031 layer = temp.getlayer(counter)
1033 layer = layer.copy()
1034 layer.remove_payload()
1035 for cf in checksum_fields:
1036 if hasattr(layer, cf):
1037 if ignore_zero_udp_checksums and \
1038 0 == getattr(layer, cf) and \
1039 layer.name in udp_layers:
1041 delattr(temp.getlayer(counter), cf)
1042 checksums.append((counter, cf))
1045 counter = counter + 1
1046 if 0 == len(checksums):
1048 temp = temp.__class__(scapy.compat.raw(temp))
1049 for layer, cf in checksums:
1050 calc_sum = getattr(temp[layer], cf)
1052 getattr(received[layer], cf), calc_sum,
1053 "packet checksum on layer #%d: %s" % (layer, temp[layer].name))
1055 "Checksum field `%s` on `%s` layer has correct value `%s`" %
1056 (cf, temp[layer].name, calc_sum))
1058 def assert_checksum_valid(self, received_packet, layer,
1059 field_name='chksum',
1060 ignore_zero_checksum=False):
1061 """ Check checksum of received packet on given layer """
1062 received_packet_checksum = getattr(received_packet[layer], field_name)
1063 if ignore_zero_checksum and 0 == received_packet_checksum:
1065 recalculated = received_packet.__class__(
1066 scapy.compat.raw(received_packet))
1067 delattr(recalculated[layer], field_name)
1068 recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
1069 self.assert_equal(received_packet_checksum,
1070 getattr(recalculated[layer], field_name),
1071 "packet checksum on layer: %s" % layer)
1073 def assert_ip_checksum_valid(self, received_packet,
1074 ignore_zero_checksum=False):
1075 self.assert_checksum_valid(received_packet, 'IP',
1076 ignore_zero_checksum=ignore_zero_checksum)
1078 def assert_tcp_checksum_valid(self, received_packet,
1079 ignore_zero_checksum=False):
1080 self.assert_checksum_valid(received_packet, 'TCP',
1081 ignore_zero_checksum=ignore_zero_checksum)
1083 def assert_udp_checksum_valid(self, received_packet,
1084 ignore_zero_checksum=True):
1085 self.assert_checksum_valid(received_packet, 'UDP',
1086 ignore_zero_checksum=ignore_zero_checksum)
1088 def assert_embedded_icmp_checksum_valid(self, received_packet):
1089 if received_packet.haslayer(IPerror):
1090 self.assert_checksum_valid(received_packet, 'IPerror')
1091 if received_packet.haslayer(TCPerror):
1092 self.assert_checksum_valid(received_packet, 'TCPerror')
1093 if received_packet.haslayer(UDPerror):
1094 self.assert_checksum_valid(received_packet, 'UDPerror',
1095 ignore_zero_checksum=True)
1096 if received_packet.haslayer(ICMPerror):
1097 self.assert_checksum_valid(received_packet, 'ICMPerror')
1099 def assert_icmp_checksum_valid(self, received_packet):
1100 self.assert_checksum_valid(received_packet, 'ICMP')
1101 self.assert_embedded_icmp_checksum_valid(received_packet)
1103 def assert_icmpv6_checksum_valid(self, pkt):
1104 if pkt.haslayer(ICMPv6DestUnreach):
1105 self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
1106 self.assert_embedded_icmp_checksum_valid(pkt)
1107 if pkt.haslayer(ICMPv6EchoRequest):
1108 self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
1109 if pkt.haslayer(ICMPv6EchoReply):
1110 self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
1112 def get_packet_counter(self, counter):
1113 if counter.startswith("/"):
1114 counter_value = self.statistics.get_counter(counter)
1116 counters = self.vapi.cli("sh errors").split('\n')
1118 for i in range(1, len(counters) - 1):
1119 results = counters[i].split()
1120 if results[1] == counter:
1121 counter_value = int(results[0])
1123 return counter_value
1125 def assert_packet_counter_equal(self, counter, expected_value):
1126 counter_value = self.get_packet_counter(counter)
1127 self.assert_equal(counter_value, expected_value,
1128 "packet counter `%s'" % counter)
1130 def assert_error_counter_equal(self, counter, expected_value):
1131 counter_value = self.statistics.get_err_counter(counter)
1132 self.assert_equal(counter_value, expected_value,
1133 "error counter `%s'" % counter)
1136 def sleep(cls, timeout, remark=None):
1138 # /* Allow sleep(0) to maintain win32 semantics, and as decreed
1139 # * by Guido, only the main thread can be interrupted.
1141 # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
1144 if hasattr(os, 'sched_yield'):
1150 if hasattr(cls, 'logger'):
1151 cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
1152 before = time.time()
1155 if hasattr(cls, 'logger') and after - before > 2 * timeout:
1156 cls.logger.error("unexpected self.sleep() result - "
1157 "slept for %es instead of ~%es!",
1158 after - before, timeout)
1159 if hasattr(cls, 'logger'):
1161 "Finished sleep (%s) - slept %es (wanted %es)",
1162 remark, after - before, timeout)
1164 def pg_send(self, intf, pkts, worker=None):
1165 self.vapi.cli("clear trace")
1166 intf.add_stream(pkts, worker=worker)
1167 self.pg_enable_capture(self.pg_interfaces)
1170 def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
1171 self.pg_send(intf, pkts)
1174 for i in self.pg_interfaces:
1175 i.get_capture(0, timeout=timeout)
1176 i.assert_nothing_captured(remark=remark)
1179 def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None):
1182 self.pg_send(intf, pkts, worker=worker)
1183 rx = output.get_capture(n_rx)
1186 def send_and_expect_only(self, intf, pkts, output, timeout=None):
1187 self.pg_send(intf, pkts)
1188 rx = output.get_capture(len(pkts))
1192 for i in self.pg_interfaces:
1193 if i not in outputs:
1194 i.get_capture(0, timeout=timeout)
1195 i.assert_nothing_captured()
1201 def get_testcase_doc_name(test):
1202 return getdoc(test.__class__).splitlines()[0]
1205 def get_test_description(descriptions, test):
1206 short_description = test.shortDescription()
1207 if descriptions and short_description:
1208 return short_description
1213 class TestCaseInfo(object):
1214 def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
1215 self.logger = logger
1216 self.tempdir = tempdir
1217 self.vpp_pid = vpp_pid
1218 self.vpp_bin_path = vpp_bin_path
1219 self.core_crash_test = None
1222 class VppTestResult(unittest.TestResult):
1224 @property result_string
1225 String variable to store the test case result string.
1227 List variable containing 2-tuples of TestCase instances and strings
1228 holding formatted tracebacks. Each tuple represents a test which
1229 raised an unexpected exception.
1231 List variable containing 2-tuples of TestCase instances and strings
1232 holding formatted tracebacks. Each tuple represents a test where
1233 a failure was explicitly signalled using the TestCase.assert*()
1237 failed_test_cases_info = set()
1238 core_crash_test_cases_info = set()
1239 current_test_case_info = None
1241 def __init__(self, stream=None, descriptions=None, verbosity=None,
1244 :param stream File descriptor to store where to report test results.
1245 Set to the standard error stream by default.
1246 :param descriptions Boolean variable to store information if to use
1247 test case descriptions.
1248 :param verbosity Integer variable to store required verbosity level.
1250 super(VppTestResult, self).__init__(stream, descriptions, verbosity)
1251 self.stream = stream
1252 self.descriptions = descriptions
1253 self.verbosity = verbosity
1254 self.result_string = None
1255 self.runner = runner
1257 def addSuccess(self, test):
1259 Record a test succeeded result
1264 if self.current_test_case_info:
1265 self.current_test_case_info.logger.debug(
1266 "--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
1267 test._testMethodName,
1268 test._testMethodDoc))
1269 unittest.TestResult.addSuccess(self, test)
1270 self.result_string = colorize("OK", GREEN)
1272 self.send_result_through_pipe(test, PASS)
1274 def addSkip(self, test, reason):
1276 Record a test skipped.
1282 if self.current_test_case_info:
1283 self.current_test_case_info.logger.debug(
1284 "--- addSkip() %s.%s(%s) called, reason is %s" %
1285 (test.__class__.__name__, test._testMethodName,
1286 test._testMethodDoc, reason))
1287 unittest.TestResult.addSkip(self, test, reason)
1288 self.result_string = colorize("SKIP", YELLOW)
1290 self.send_result_through_pipe(test, SKIP)
1292 def symlink_failed(self):
1293 if self.current_test_case_info:
1295 failed_dir = os.getenv('FAILED_DIR')
1296 link_path = os.path.join(
1299 os.path.basename(self.current_test_case_info.tempdir))
1300 if self.current_test_case_info.logger:
1301 self.current_test_case_info.logger.debug(
1302 "creating a link to the failed test")
1303 self.current_test_case_info.logger.debug(
1304 "os.symlink(%s, %s)" %
1305 (self.current_test_case_info.tempdir, link_path))
1306 if os.path.exists(link_path):
1307 if self.current_test_case_info.logger:
1308 self.current_test_case_info.logger.debug(
1309 'symlink already exists')
1311 os.symlink(self.current_test_case_info.tempdir, link_path)
1313 except Exception as e:
1314 if self.current_test_case_info.logger:
1315 self.current_test_case_info.logger.error(e)
1317 def send_result_through_pipe(self, test, result):
1318 if hasattr(self, 'test_framework_result_pipe'):
1319 pipe = self.test_framework_result_pipe
1321 pipe.send((test.id(), result))
1323 def log_error(self, test, err, fn_name):
1324 if self.current_test_case_info:
1325 if isinstance(test, unittest.suite._ErrorHolder):
1326 test_name = test.description
1328 test_name = '%s.%s(%s)' % (test.__class__.__name__,
1329 test._testMethodName,
1330 test._testMethodDoc)
1331 self.current_test_case_info.logger.debug(
1332 "--- %s() %s called, err is %s" %
1333 (fn_name, test_name, err))
1334 self.current_test_case_info.logger.debug(
1335 "formatted exception is:\n%s" %
1336 "".join(format_exception(*err)))
1338 def add_error(self, test, err, unittest_fn, error_type):
1339 if error_type == FAIL:
1340 self.log_error(test, err, 'addFailure')
1341 error_type_str = colorize("FAIL", RED)
1342 elif error_type == ERROR:
1343 self.log_error(test, err, 'addError')
1344 error_type_str = colorize("ERROR", RED)
1346 raise Exception('Error type %s cannot be used to record an '
1347 'error or a failure' % error_type)
1349 unittest_fn(self, test, err)
1350 if self.current_test_case_info:
1351 self.result_string = "%s [ temp dir used by test case: %s ]" % \
1353 self.current_test_case_info.tempdir)
1354 self.symlink_failed()
1355 self.failed_test_cases_info.add(self.current_test_case_info)
1356 if is_core_present(self.current_test_case_info.tempdir):
1357 if not self.current_test_case_info.core_crash_test:
1358 if isinstance(test, unittest.suite._ErrorHolder):
1359 test_name = str(test)
1361 test_name = "'{!s}' ({!s})".format(
1362 get_testcase_doc_name(test), test.id())
1363 self.current_test_case_info.core_crash_test = test_name
1364 self.core_crash_test_cases_info.add(
1365 self.current_test_case_info)
1367 self.result_string = '%s [no temp dir]' % error_type_str
1369 self.send_result_through_pipe(test, error_type)
1371 def addFailure(self, test, err):
1373 Record a test failed result
1376 :param err: error message
1379 self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
1381 def addError(self, test, err):
1383 Record a test error result
1386 :param err: error message
1389 self.add_error(test, err, unittest.TestResult.addError, ERROR)
1391 def getDescription(self, test):
1393 Get test description
1396 :returns: test description
1399 return get_test_description(self.descriptions, test)
1401 def startTest(self, test):
1409 def print_header(test):
1410 if not hasattr(test.__class__, '_header_printed'):
1411 print(double_line_delim)
1412 print(colorize(getdoc(test).splitlines()[0], GREEN))
1413 print(double_line_delim)
1414 test.__class__._header_printed = True
1417 self.start_test = time.time()
1418 unittest.TestResult.startTest(self, test)
1419 if self.verbosity > 0:
1420 self.stream.writeln(
1421 "Starting " + self.getDescription(test) + " ...")
1422 self.stream.writeln(single_line_delim)
1424 def stopTest(self, test):
1426 Called when the given test has been run
1431 unittest.TestResult.stopTest(self, test)
1433 if self.verbosity > 0:
1434 self.stream.writeln(single_line_delim)
1435 self.stream.writeln("%-73s%s" % (self.getDescription(test),
1436 self.result_string))
1437 self.stream.writeln(single_line_delim)
1439 self.stream.writeln("%-68s %4.2f %s" %
1440 (self.getDescription(test),
1441 time.time() - self.start_test,
1442 self.result_string))
1444 self.send_result_through_pipe(test, TEST_RUN)
1446 def printErrors(self):
1448 Print errors from running the test case
1450 if len(self.errors) > 0 or len(self.failures) > 0:
1451 self.stream.writeln()
1452 self.printErrorList('ERROR', self.errors)
1453 self.printErrorList('FAIL', self.failures)
1455 # ^^ that is the last output from unittest before summary
1456 if not self.runner.print_summary:
1457 devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
1458 self.stream = devnull
1459 self.runner.stream = devnull
1461 def printErrorList(self, flavour, errors):
1463 Print error list to the output stream together with error type
1464 and test case description.
1466 :param flavour: error type
1467 :param errors: iterable errors
1470 for test, err in errors:
1471 self.stream.writeln(double_line_delim)
1472 self.stream.writeln("%s: %s" %
1473 (flavour, self.getDescription(test)))
1474 self.stream.writeln(single_line_delim)
1475 self.stream.writeln("%s" % err)
1478 class VppTestRunner(unittest.TextTestRunner):
1480 A basic test runner implementation which prints results to standard error.
1484 def resultclass(self):
1485 """Class maintaining the results of the tests"""
1486 return VppTestResult
1488 def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
1489 result_pipe=None, failfast=False, buffer=False,
1490 resultclass=None, print_summary=True, **kwargs):
1491 # ignore stream setting here, use hard-coded stdout to be in sync
1492 # with prints from VppTestCase methods ...
1493 super(VppTestRunner, self).__init__(sys.stdout, descriptions,
1494 verbosity, failfast, buffer,
1495 resultclass, **kwargs)
1496 KeepAliveReporter.pipe = keep_alive_pipe
1498 self.orig_stream = self.stream
1499 self.resultclass.test_framework_result_pipe = result_pipe
1501 self.print_summary = print_summary
1503 def _makeResult(self):
1504 return self.resultclass(self.stream,
1509 def run(self, test):
1516 faulthandler.enable() # emit stack trace to stderr if killed by signal
1518 result = super(VppTestRunner, self).run(test)
1519 if not self.print_summary:
1520 self.stream = self.orig_stream
1521 result.stream = self.orig_stream
1525 class Worker(Thread):
1526 def __init__(self, executable_args, logger, env=None, *args, **kwargs):
1527 super(Worker, self).__init__(*args, **kwargs)
1528 self.logger = logger
1529 self.args = executable_args
1530 if hasattr(self, 'testcase') and self.testcase.debug_all:
1531 if self.testcase.debug_gdbserver:
1532 self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
1533 .format(port=self.testcase.gdbserver_port)] + args
1534 elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
1535 self.args.append(self.wait_for_gdb)
1536 self.app_bin = executable_args[0]
1537 self.app_name = os.path.basename(self.app_bin)
1538 if hasattr(self, 'role'):
1539 self.app_name += ' {role}'.format(role=self.role)
1542 env = {} if env is None else env
1543 self.env = copy.deepcopy(env)
1545 def wait_for_enter(self):
1546 if not hasattr(self, 'testcase'):
1548 if self.testcase.debug_all and self.testcase.debug_gdbserver:
1550 print(double_line_delim)
1551 print("Spawned GDB Server for '{app}' with PID: {pid}"
1552 .format(app=self.app_name, pid=self.process.pid))
1553 elif self.testcase.debug_all and self.testcase.debug_gdb:
1555 print(double_line_delim)
1556 print("Spawned '{app}' with PID: {pid}"
1557 .format(app=self.app_name, pid=self.process.pid))
1560 print(single_line_delim)
1561 print("You can debug '{app}' using:".format(app=self.app_name))
1562 if self.testcase.debug_gdbserver:
1563 print("sudo gdb " + self.app_bin +
1564 " -ex 'target remote localhost:{port}'"
1565 .format(port=self.testcase.gdbserver_port))
1566 print("Now is the time to attach gdb by running the above "
1567 "command, set up breakpoints etc., then resume from "
1568 "within gdb by issuing the 'continue' command")
1569 self.testcase.gdbserver_port += 1
1570 elif self.testcase.debug_gdb:
1571 print("sudo gdb " + self.app_bin +
1572 " -ex 'attach {pid}'".format(pid=self.process.pid))
1573 print("Now is the time to attach gdb by running the above "
1574 "command and set up breakpoints etc., then resume from"
1575 " within gdb by issuing the 'continue' command")
1576 print(single_line_delim)
1577 input("Press ENTER to continue running the testcase...")
1580 executable = self.args[0]
1581 if not os.path.exists(executable) or not os.access(
1582 executable, os.F_OK | os.X_OK):
1583 # Exit code that means some system file did not exist,
1584 # could not be opened, or had some other kind of error.
1585 self.result = os.EX_OSFILE
1586 raise EnvironmentError(
1587 "executable '%s' is not found or executable." % executable)
1588 self.logger.debug("Running executable: '{app}'"
1589 .format(app=' '.join(self.args)))
1590 env = os.environ.copy()
1591 env.update(self.env)
1592 env["CK_LOG_FILE_NAME"] = "-"
1593 self.process = subprocess.Popen(
1594 self.args, shell=False, env=env, preexec_fn=os.setpgrp,
1595 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1596 self.wait_for_enter()
1597 out, err = self.process.communicate()
1598 self.logger.debug("Finished running `{app}'".format(app=self.app_name))
1599 self.logger.info("Return code is `%s'" % self.process.returncode)
1600 self.logger.info(single_line_delim)
1601 self.logger.info("Executable `{app}' wrote to stdout:"
1602 .format(app=self.app_name))
1603 self.logger.info(single_line_delim)
1604 self.logger.info(out.decode('utf-8'))
1605 self.logger.info(single_line_delim)
1606 self.logger.info("Executable `{app}' wrote to stderr:"
1607 .format(app=self.app_name))
1608 self.logger.info(single_line_delim)
1609 self.logger.info(err.decode('utf-8'))
1610 self.logger.info(single_line_delim)
1611 self.result = self.process.returncode
1614 if __name__ == '__main__':