+ while wrapped_testcase_suites:
+ finished_testcase_suites = set()
+ for wrapped_testcase_suite in wrapped_testcase_suites:
+ while wrapped_testcase_suite.result_parent_end.poll():
+ wrapped_testcase_suite.result.process_result(
+ *wrapped_testcase_suite.result_parent_end.recv())
+ wrapped_testcase_suite.last_heard = time.time()
+
+ while wrapped_testcase_suite.keep_alive_parent_end.poll():
+ wrapped_testcase_suite.last_test, \
+ wrapped_testcase_suite.last_test_vpp_binary, \
+ wrapped_testcase_suite.last_test_temp_dir, \
+ wrapped_testcase_suite.vpp_pid = \
+ wrapped_testcase_suite.keep_alive_parent_end.recv()
+ wrapped_testcase_suite.last_heard = time.time()
+
+ if wrapped_testcase_suite.finished_parent_end.poll():
+ wrapped_testcase_suite.finished_parent_end.recv()
+ wrapped_testcase_suite.last_heard = time.time()
+ stop_run = process_finished_testsuite(
+ wrapped_testcase_suite,
+ finished_testcase_suites,
+ failed_wrapped_testcases,
+ results) or stop_run
+ continue
+
+ fail = False
+ if wrapped_testcase_suite.last_heard + test_timeout < \
+ time.time():
+ fail = True
+ wrapped_testcase_suite.logger.critical(
+ "Child test runner process timed out "
+ "(last test running was `%s' in `%s')!" %
+ (wrapped_testcase_suite.last_test,
+ wrapped_testcase_suite.last_test_temp_dir))
+ elif not wrapped_testcase_suite.child.is_alive():
+ fail = True
+ wrapped_testcase_suite.logger.critical(
+ "Child test runner process unexpectedly died "
+ "(last test running was `%s' in `%s')!" %
+ (wrapped_testcase_suite.last_test,
+ wrapped_testcase_suite.last_test_temp_dir))
+ elif wrapped_testcase_suite.last_test_temp_dir and \
+ wrapped_testcase_suite.last_test_vpp_binary:
+ if is_core_present(
+ wrapped_testcase_suite.last_test_temp_dir):
+ wrapped_testcase_suite.add_testclass_with_core()
+ if wrapped_testcase_suite.core_detected_at is None:
+ wrapped_testcase_suite.core_detected_at = \
+ time.time()
+ elif wrapped_testcase_suite.core_detected_at + \
+ core_timeout < time.time():
+ wrapped_testcase_suite.logger.critical(
+ "Child test runner process unresponsive and "
+ "core-file exists in test temporary directory "
+ "(last test running was `%s' in `%s')!" %
+ (wrapped_testcase_suite.last_test,
+ wrapped_testcase_suite.last_test_temp_dir))
+ fail = True
+
+ if fail:
+ wrapped_testcase_suite.child.terminate()
+ try:
+ # terminating the child process tends to leave orphan
+ # VPP process around
+ if wrapped_testcase_suite.vpp_pid:
+ os.kill(wrapped_testcase_suite.vpp_pid,
+ signal.SIGTERM)
+ except OSError:
+ # already dead
+ pass
+ wrapped_testcase_suite.result.crashed = True
+ wrapped_testcase_suite.result.process_result(
+ wrapped_testcase_suite.last_test_id, ERROR)
+ stop_run = process_finished_testsuite(
+ wrapped_testcase_suite,
+ finished_testcase_suites,
+ failed_wrapped_testcases,
+ results) or stop_run
+
+ for finished_testcase in finished_testcase_suites:
+ # Somewhat surprisingly, the join below may
+ # timeout, even if client signaled that
+ # it finished - so we note it just in case.
+ join_start = time.time()
+ finished_testcase.child.join(test_finished_join_timeout)
+ join_end = time.time()
+ if join_end - join_start >= test_finished_join_timeout:
+ finished_testcase.logger.error(
+ "Timeout joining finished test: %s (pid %d)" %
+ (finished_testcase.last_test,
+ finished_testcase.child.pid))
+ finished_testcase.close_pipes()
+ wrapped_testcase_suites.remove(finished_testcase)
+ finished_unread_testcases.add(finished_testcase)
+ finished_testcase.stdouterr_queue.put(None)
+ on_suite_finish(finished_testcase)
+ if stop_run:
+ while testcase_suites:
+ results.append(TestResult(testcase_suites.pop(0)))
+ elif testcase_suites:
+ a_suite = testcase_suites.pop(0)
+ while a_suite and a_suite.is_tagged_run_solo:
+ solo_testcase_suites.append(a_suite)
+ if testcase_suites:
+ a_suite = testcase_suites.pop(0)
+ else:
+ a_suite = None
+ if a_suite and can_run_suite(a_suite):
+ run_suite(a_suite)
+ if solo_testcase_suites and tests_running == 0:
+ a_suite = solo_testcase_suites.pop(0)
+ run_suite(a_suite)
+ time.sleep(0.1)
+ except Exception:
+ for wrapped_testcase_suite in wrapped_testcase_suites:
+ wrapped_testcase_suite.child.terminate()
+ wrapped_testcase_suite.stdouterr_queue.put(None)
+ raise
+ finally:
+ read_from_testcases.clear()
+ stdouterr_thread.join(test_timeout)
+ manager.shutdown()
+
+ handle_cores(failed_wrapped_testcases)
+ return results
+
+
+class TestSuiteWrapper(unittest.TestSuite):
+ cpus_used = 0
+
+ def __init__(self):
+ return super().__init__()
+
+ def addTest(self, test):
+ self.cpus_used = max(self.cpus_used, test.get_cpus_required())
+ super().addTest(test)
+
+ def assign_cpus(self, cpus):
+ self.cpus = cpus
+
+ def _handleClassSetUp(self, test, result):
+ if not test.__class__.skipped_due_to_cpu_lack:
+ test.assign_cpus(self.cpus)
+ super()._handleClassSetUp(test, result)
+
+ def get_assigned_cpus(self):
+ return self.cpus
+
+
+class SplitToSuitesCallback:
+ def __init__(self, filter_callback):
+ self.suites = {}
+ self.suite_name = 'default'
+ self.filter_callback = filter_callback
+ self.filtered = TestSuiteWrapper()
+
+ def __call__(self, file_name, cls, method):
+ test_method = cls(method)
+ if self.filter_callback(file_name, cls.__name__, method):
+ self.suite_name = file_name + cls.__name__
+ if self.suite_name not in self.suites:
+ self.suites[self.suite_name] = TestSuiteWrapper()
+ self.suites[self.suite_name].is_tagged_run_solo = False
+ self.suites[self.suite_name].addTest(test_method)
+ if test_method.is_tagged_run_solo():
+ self.suites[self.suite_name].is_tagged_run_solo = True
+
+ else:
+ self.filtered.addTest(test_method)
+
+
+test_option = "TEST"
+
+
+def parse_test_option():
+ f = os.getenv(test_option, None)
+ filter_file_name = None
+ filter_class_name = None
+ filter_func_name = None
+ if f:
+ if '.' in f:
+ parts = f.split('.')
+ if len(parts) > 3:
+ raise Exception("Unrecognized %s option: %s" %
+ (test_option, f))
+ if len(parts) > 2:
+ if parts[2] not in ('*', ''):
+ filter_func_name = parts[2]
+ if parts[1] not in ('*', ''):
+ filter_class_name = parts[1]
+ if parts[0] not in ('*', ''):
+ if parts[0].startswith('test_'):
+ filter_file_name = parts[0]
+ else:
+ filter_file_name = 'test_%s' % parts[0]
+ else:
+ if f.startswith('test_'):
+ filter_file_name = f
+ else:
+ filter_file_name = 'test_%s' % f
+ if filter_file_name:
+ filter_file_name = '%s.py' % filter_file_name
+ return filter_file_name, filter_class_name, filter_func_name
+
+
+def filter_tests(tests, filter_cb):
+ result = TestSuiteWrapper()
+ for t in tests:
+ if isinstance(t, unittest.suite.TestSuite):
+ # this is a bunch of tests, recursively filter...
+ x = filter_tests(t, filter_cb)
+ if x.countTestCases() > 0:
+ result.addTest(x)
+ elif isinstance(t, unittest.TestCase):
+ # this is a single test
+ parts = t.id().split('.')
+ # t.id() for common cases like this:
+ # test_classifier.TestClassifier.test_acl_ip
+ # apply filtering only if it is so
+ if len(parts) == 3:
+ if not filter_cb(parts[0], parts[1], parts[2]):
+ continue
+ result.addTest(t)
+ else:
+ # unexpected object, don't touch it
+ result.addTest(t)
+ return result
+
+
+class FilterByTestOption:
+ def __init__(self, filter_file_name, filter_class_name, filter_func_name):
+ self.filter_file_name = filter_file_name
+ self.filter_class_name = filter_class_name
+ self.filter_func_name = filter_func_name
+
+ def __call__(self, file_name, class_name, func_name):
+ if self.filter_file_name:
+ fn_match = fnmatch.fnmatch(file_name, self.filter_file_name)
+ if not fn_match:
+ return False
+ if self.filter_class_name and class_name != self.filter_class_name:
+ return False
+ if self.filter_func_name and func_name != self.filter_func_name:
+ return False
+ return True
+
+
+class FilterByClassList:
+ def __init__(self, classes_with_filenames):
+ self.classes_with_filenames = classes_with_filenames
+
+ def __call__(self, file_name, class_name, func_name):
+ return '.'.join([file_name, class_name]) in self.classes_with_filenames
+
+
+def suite_from_failed(suite, failed):
+ failed = {x.rsplit('.', 1)[0] for x in failed}
+ filter_cb = FilterByClassList(failed)
+ suite = filter_tests(suite, filter_cb)
+ return suite
+
+
+class AllResults(dict):
+ def __init__(self):
+ super(AllResults, self).__init__()
+ self.all_testcases = 0
+ self.results_per_suite = []
+ self[PASS] = 0
+ self[FAIL] = 0
+ self[ERROR] = 0
+ self[SKIP] = 0
+ self[SKIP_CPU_SHORTAGE] = 0
+ self[TEST_RUN] = 0
+ self.rerun = []
+ self.testsuites_no_tests_run = []
+
+ def add_results(self, result):
+ self.results_per_suite.append(result)
+ result_types = [PASS, FAIL, ERROR, SKIP, TEST_RUN, SKIP_CPU_SHORTAGE]
+ for result_type in result_types:
+ self[result_type] += len(result[result_type])
+
+ def add_result(self, result):
+ retval = 0
+ self.all_testcases += result.testcase_suite.countTestCases()
+ self.add_results(result)
+
+ if result.no_tests_run():
+ self.testsuites_no_tests_run.append(result.testcase_suite)
+ if result.crashed:
+ retval = -1
+ else:
+ retval = 1
+ elif not result.was_successful():
+ retval = 1
+
+ if retval != 0:
+ self.rerun.append(result.testcase_suite)
+
+ return retval
+
+ def print_results(self):
+ print('')
+ print(double_line_delim)
+ print('TEST RESULTS:')
+
+ def indent_results(lines):
+ lines = list(filter(None, lines))
+ maximum = max(lines, key=lambda x: x.index(":"))
+ maximum = 4 + maximum.index(":")
+ for l in lines:
+ padding = " " * (maximum - l.index(":"))
+ print(f"{padding}{l}")
+
+ indent_results([
+ f'Scheduled tests: {self.all_testcases}',
+ f'Executed tests: {self[TEST_RUN]}',
+ f'Passed tests: {colorize(self[PASS], GREEN)}',
+ f'Skipped tests: {colorize(self[SKIP], YELLOW)}'
+ if self[SKIP] else None,
+ f'Not Executed tests: {colorize(self.not_executed, RED)}'
+ if self.not_executed else None,
+ f'Failures: {colorize(self[FAIL], RED)}' if self[FAIL] else None,
+ f'Errors: {colorize(self[ERROR], RED)}' if self[ERROR] else None,
+ 'Tests skipped due to lack of CPUS: '
+ f'{colorize(self[SKIP_CPU_SHORTAGE], YELLOW)}'
+ if self[SKIP_CPU_SHORTAGE] else None
+ ])
+
+ if self.all_failed > 0:
+ print('FAILURES AND ERRORS IN TESTS:')
+ for result in self.results_per_suite:
+ failed_testcase_ids = result[FAIL]
+ errored_testcase_ids = result[ERROR]
+ old_testcase_name = None
+ if failed_testcase_ids:
+ for failed_test_id in failed_testcase_ids:
+ new_testcase_name, test_name = \
+ result.get_testcase_names(failed_test_id)
+ if new_testcase_name != old_testcase_name:
+ print(' Testcase name: {}'.format(
+ colorize(new_testcase_name, RED)))
+ old_testcase_name = new_testcase_name
+ print(' FAILURE: {} [{}]'.format(
+ colorize(test_name, RED), failed_test_id))
+ if errored_testcase_ids:
+ for errored_test_id in errored_testcase_ids:
+ new_testcase_name, test_name = \
+ result.get_testcase_names(errored_test_id)
+ if new_testcase_name != old_testcase_name:
+ print(' Testcase name: {}'.format(
+ colorize(new_testcase_name, RED)))
+ old_testcase_name = new_testcase_name
+ print(' ERROR: {} [{}]'.format(
+ colorize(test_name, RED), errored_test_id))
+ if self.testsuites_no_tests_run:
+ print('TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:')
+ tc_classes = set()
+ for testsuite in self.testsuites_no_tests_run:
+ for testcase in testsuite:
+ tc_classes.add(get_testcase_doc_name(testcase))
+ for tc_class in tc_classes:
+ print(' {}'.format(colorize(tc_class, RED)))
+
+ if self[SKIP_CPU_SHORTAGE]:
+ print()
+ print(colorize(' SOME TESTS WERE SKIPPED BECAUSE THERE ARE NOT'
+ ' ENOUGH CPUS AVAILABLE', YELLOW))
+ print(double_line_delim)
+ print('')
+
+ @property
+ def not_executed(self):
+ return self.all_testcases - self[TEST_RUN]
+
+ @property
+ def all_failed(self):
+ return self[FAIL] + self[ERROR]
+
+
+def parse_results(results):
+ """
+ Prints the number of scheduled, executed, not executed, passed, failed,
+ errored and skipped tests and details about failed and errored tests.
+
+ Also returns all suites where any test failed.
+
+ :param results:
+ :return:
+ """
+
+ results_per_suite = AllResults()
+ crashed = False
+ failed = False
+ for result in results:
+ result_code = results_per_suite.add_result(result)
+ if result_code == 1:
+ failed = True
+ elif result_code == -1:
+ crashed = True
+
+ results_per_suite.print_results()
+
+ if crashed:
+ return_code = -1
+ elif failed:
+ return_code = 1
+ else:
+ return_code = 0
+ return return_code, results_per_suite.rerun
+
+
+def parse_digit_env(env_var, default):
+ value = os.getenv(env_var, default)
+ if value != default:
+ if value.isdigit():
+ value = int(value)
+ else:
+ print('WARNING: unsupported value "%s" for env var "%s",'
+ 'defaulting to %s' % (value, env_var, default))
+ value = default
+ return value
+
+
+if __name__ == '__main__':
+
+ verbose = parse_digit_env("V", 0)
+
+ test_timeout = parse_digit_env("TIMEOUT", 600) # default = 10 minutes
+
+ test_finished_join_timeout = 15
+
+ retries = parse_digit_env("RETRIES", 0)
+
+ debug = os.getenv("DEBUG", "n").lower() in ["gdb", "gdbserver", "attach"]
+
+ debug_core = os.getenv("DEBUG", "").lower() == "core"
+ compress_core = framework.BoolEnvironmentVariable("CORE_COMPRESS")
+
+ if os.getenv("VPP_IN_GDB", "n").lower() in ["1", "y", "yes"]:
+ start_vpp_in_gdb()
+ exit()
+
+ step = framework.BoolEnvironmentVariable("STEP")
+ force_foreground = framework.BoolEnvironmentVariable("FORCE_FOREGROUND")
+
+ run_interactive = debug or step or force_foreground
+
+ max_concurrent_tests = 0
+ print(f"OS reports {num_cpus} available cpu(s).")
+
+ test_jobs = os.getenv("TEST_JOBS", "1").lower() # default = 1 process
+ if test_jobs == 'auto':
+ if run_interactive:
+ max_concurrent_tests = 1
+ print('Interactive mode required, running tests consecutively.')
+ else:
+ max_concurrent_tests = num_cpus
+ print(f"Running at most {max_concurrent_tests} python test "
+ "processes concurrently.")
+ else:
+ try:
+ test_jobs = int(test_jobs)
+ except ValueError as e:
+ raise ValueError("Invalid TEST_JOBS value specified, valid "
+ "values are a positive integer or 'auto'") from e
+ if test_jobs <= 0:
+ raise ValueError("Invalid TEST_JOBS value specified, valid "
+ "values are a positive integer or 'auto'")
+ max_concurrent_tests = int(test_jobs)
+ print(f"Running at most {max_concurrent_tests} python test processes "
+ "concurrently as set by 'TEST_JOBS'.")
+
+ print(f"Using at most {max_vpp_cpus} cpus for VPP threads.")
+
+ if run_interactive and max_concurrent_tests > 1:
+ raise NotImplementedError(
+ 'Running tests interactively (DEBUG is gdb[server] or ATTACH or '
+ 'STEP is set) in parallel (TEST_JOBS is more than 1) is not '
+ 'supported')
+
+ parser = argparse.ArgumentParser(description="VPP unit tests")
+ parser.add_argument("-f", "--failfast", action='store_true',
+ help="fast failure flag")
+ parser.add_argument("-d", "--dir", action='append', type=str,
+ help="directory containing test files "
+ "(may be specified multiple times)")
+ args = parser.parse_args()
+ failfast = args.failfast
+ descriptions = True
+
+ print("Running tests using custom test runner.")
+ filter_file, filter_class, filter_func = parse_test_option()
+
+ print("Active filters: file=%s, class=%s, function=%s" % (
+ filter_file, filter_class, filter_func))
+
+ filter_cb = FilterByTestOption(filter_file, filter_class, filter_func)
+
+ ignore_path = os.getenv("VENV_PATH", None)
+ cb = SplitToSuitesCallback(filter_cb)
+ for d in args.dir:
+ print("Adding tests from directory tree %s" % d)
+ discover_tests(d, cb, ignore_path)
+
+ # suites are not hashable, need to use list
+ suites = []
+ tests_amount = 0
+ for testcase_suite in cb.suites.values():
+ tests_amount += testcase_suite.countTestCases()
+ if testcase_suite.cpus_used > max_vpp_cpus:
+ # here we replace test functions with lambdas to just skip them
+ # but we also replace setUp/tearDown functions to do nothing
+ # so that the test can be "started" and "stopped", so that we can
+ # still keep those prints (test description - SKIP), which are done
+ # in stopTest() (for that to trigger, test function must run)
+ for t in testcase_suite:
+ for m in dir(t):
+ if m.startswith('test_'):
+ setattr(t, m, lambda: t.skipTest("not enough cpus"))
+ setattr(t.__class__, 'setUpClass', lambda: None)
+ setattr(t.__class__, 'tearDownClass', lambda: None)
+ setattr(t, 'setUp', lambda: None)
+ setattr(t, 'tearDown', lambda: None)
+ t.__class__.skipped_due_to_cpu_lack = True
+ suites.append(testcase_suite)
+
+ print("%s out of %s tests match specified filters" % (
+ tests_amount, tests_amount + cb.filtered.countTestCases()))
+
+ if not running_extended_tests:
+ print("Not running extended tests (some tests will be skipped)")
+
+ attempts = retries + 1
+ if attempts > 1:
+ print("Perform %s attempts to pass the suite..." % attempts)
+
+ if run_interactive and suites:
+ # don't fork if requiring interactive terminal
+ print('Running tests in foreground in the current process')
+ full_suite = unittest.TestSuite()
+ free_cpus = list(available_cpus)
+ cpu_shortage = False
+ for suite in suites:
+ if suite.cpus_used <= max_vpp_cpus:
+ suite.assign_cpus(free_cpus[:suite.cpus_used])
+ else:
+ suite.assign_cpus([])
+ cpu_shortage = True
+ full_suite.addTests(suites)
+ result = VppTestRunner(verbosity=verbose,
+ failfast=failfast,
+ print_summary=True).run(full_suite)
+ was_successful = result.wasSuccessful()
+ if not was_successful:
+ for test_case_info in result.failed_test_cases_info:
+ handle_failed_suite(test_case_info.logger,
+ test_case_info.tempdir,
+ test_case_info.vpp_pid)
+ if test_case_info in result.core_crash_test_cases_info:
+ check_and_handle_core(test_case_info.vpp_bin_path,
+ test_case_info.tempdir,
+ test_case_info.core_crash_test)
+
+ if cpu_shortage:
+ print()
+ print(colorize('SOME TESTS WERE SKIPPED BECAUSE THERE ARE NOT'
+ ' ENOUGH CPUS AVAILABLE', YELLOW))
+ print()
+ sys.exit(not was_successful)
+ else:
+ print('Running each VPPTestCase in a separate background process'
+ f' with at most {max_concurrent_tests} parallel python test '
+ 'process(es)')
+ exit_code = 0
+ while suites and attempts > 0:
+ results = run_forked(suites)
+ exit_code, suites = parse_results(results)
+ attempts -= 1
+ if exit_code == 0:
+ print('Test run was successful')
+ else:
+ print('%s attempt(s) left.' % attempts)
+ sys.exit(exit_code)