+ if f.startswith("test_"):
+ filter_file_name = f
+ else:
+ filter_file_name = "test_%s" % f
+ if filter_file_name:
+ filter_file_name = "%s.py" % filter_file_name
+ return filter_file_name, filter_class_name, filter_func_name
+
+
+def filter_tests(tests, filter_cb):
+ result = TestSuiteWrapper()
+ for t in tests:
+ if isinstance(t, unittest.suite.TestSuite):
+ # this is a bunch of tests, recursively filter...
+ x = filter_tests(t, filter_cb)
+ if x.countTestCases() > 0:
+ result.addTest(x)
+ elif isinstance(t, unittest.TestCase):
+ # this is a single test
+ parts = t.id().split(".")
+ # t.id() for common cases like this:
+ # test_classifier.TestClassifier.test_acl_ip
+ # apply filtering only if it is so
+ if len(parts) == 3:
+ if not filter_cb(parts[0], parts[1], parts[2]):
+ continue
+ result.addTest(t)
+ else:
+ # unexpected object, don't touch it
+ result.addTest(t)
+ return result
+
+
+class FilterByTestOption:
+ def __init__(self, filters):
+ self.filters = filters
+
+ def __call__(self, file_name, class_name, func_name):
+ def test_one(
+ filter_file_name,
+ filter_class_name,
+ filter_func_name,
+ file_name,
+ class_name,
+ func_name,
+ ):
+ if filter_file_name:
+ fn_match = fnmatch.fnmatch(file_name, filter_file_name)
+ if not fn_match:
+ return False
+ if filter_class_name and class_name != filter_class_name:
+ return False
+ if filter_func_name and func_name != filter_func_name:
+ return False
+ return True
+
+ for filter_file_name, filter_class_name, filter_func_name in self.filters:
+ if test_one(
+ filter_file_name,
+ filter_class_name,
+ filter_func_name,
+ file_name,
+ class_name,
+ func_name,
+ ):
+ return True
+
+ return False
+
+
+class FilterByClassList:
+ def __init__(self, classes_with_filenames):
+ self.classes_with_filenames = classes_with_filenames
+
+ def __call__(self, file_name, class_name, func_name):
+ return ".".join([file_name, class_name]) in self.classes_with_filenames
+
+
+def suite_from_failed(suite, failed):
+ failed = {x.rsplit(".", 1)[0] for x in failed}
+ filter_cb = FilterByClassList(failed)
+ suite = filter_tests(suite, filter_cb)
+ return suite
+
+
+class AllResults(dict):
+ def __init__(self):
+ super(AllResults, self).__init__()
+ self.all_testcases = 0
+ self.results_per_suite = []
+ for trc in list(TestResultCode):
+ self[trc] = 0
+ self.rerun = []
+ self.testsuites_no_tests_run = []
+
+ def add_results(self, result):
+ self.results_per_suite.append(result)
+ for trc in list(TestResultCode):
+ self[trc] += len(result[trc])
+
+ def add_result(self, result):
+ retval = 0
+ self.all_testcases += result.testcase_suite.countTestCases()
+ self.add_results(result)
+
+ if result.no_tests_run():
+ self.testsuites_no_tests_run.append(result.testcase_suite)
+ if result.crashed:
+ retval = -1
+ else:
+ retval = 1
+ elif not result.was_successful():
+ retval = 1
+
+ if retval != 0:
+ self.rerun.append(result.testcase_suite)
+
+ return retval
+
+ def print_results(self):
+ print("")
+ print(double_line_delim)
+ print("TEST RESULTS:")
+
+ def indent_results(lines):
+ lines = list(filter(None, lines))
+ maximum = max(lines, key=lambda x: x.index(":"))
+ maximum = 4 + maximum.index(":")
+ for l in lines:
+ padding = " " * (maximum - l.index(":"))
+ print(f"{padding}{l}")
+
+ indent_results(
+ [
+ f"Scheduled tests: {self.all_testcases}",
+ f"Executed tests: {self[TestResultCode.TEST_RUN]}",
+ f"Passed tests: {colorize(self[TestResultCode.PASS], GREEN)}",
+ f"Expected failures: {colorize(self[TestResultCode.EXPECTED_FAIL], GREEN)}"
+ if self[TestResultCode.EXPECTED_FAIL]
+ else None,
+ f"Skipped tests: {colorize(self[TestResultCode.SKIP], YELLOW)}"
+ if self[TestResultCode.SKIP]
+ else None,
+ f"Not Executed tests: {colorize(self.not_executed, RED)}"
+ if self.not_executed
+ else None,
+ f"Failures: {colorize(self[TestResultCode.FAIL], RED)}"
+ if self[TestResultCode.FAIL]
+ else None,
+ f"Unexpected passes: {colorize(self[TestResultCode.UNEXPECTED_PASS], RED)}"
+ if self[TestResultCode.UNEXPECTED_PASS]
+ else None,
+ f"Errors: {colorize(self[TestResultCode.ERROR], RED)}"
+ if self[TestResultCode.ERROR]
+ else None,
+ "Tests skipped due to lack of CPUS: "
+ f"{colorize(self[TestResultCode.SKIP_CPU_SHORTAGE], YELLOW)}"
+ if self[TestResultCode.SKIP_CPU_SHORTAGE]
+ else None,
+ ]
+ )
+
+ if self.all_failed > 0:
+ print("FAILURES AND ERRORS IN TESTS:")
+ for result in self.results_per_suite:
+ old_testcase_name = None
+ for tr_code, headline in (
+ (TestResultCode.FAIL, "FAILURE"),
+ (TestResultCode.ERROR, "ERROR"),
+ (TestResultCode.UNEXPECTED_PASS, "UNEXPECTED PASS"),
+ ):
+ if not result[tr_code]:
+ continue
+
+ for failed_test_id in result[tr_code]:
+ new_testcase_name, test_name = result.get_testcase_names(
+ failed_test_id
+ )
+ if new_testcase_name != old_testcase_name:
+ print(
+ f" Testcase name: {colorize(new_testcase_name, RED)}"
+ )
+ old_testcase_name = new_testcase_name
+ print(
+ f" {headline}: {colorize(test_name, RED)} [{failed_test_id}]"
+ )
+
+ if self.testsuites_no_tests_run:
+ print("TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:")
+ tc_classes = set()
+ for testsuite in self.testsuites_no_tests_run:
+ for testcase in testsuite:
+ tc_classes.add(get_testcase_doc_name(testcase))
+ for tc_class in tc_classes:
+ print(" {}".format(colorize(tc_class, RED)))
+
+ if self[TestResultCode.SKIP_CPU_SHORTAGE]:
+ print()
+ print(
+ colorize(
+ " SOME TESTS WERE SKIPPED BECAUSE THERE ARE NOT"
+ " ENOUGH CPUS AVAILABLE",
+ YELLOW,
+ )
+ )
+ print(double_line_delim)
+ print("")
+
+ @property
+ def not_executed(self):
+ return self.all_testcases - self[TestResultCode.TEST_RUN]
+
+ @property
+ def all_failed(self):
+ return (
+ self[TestResultCode.FAIL]
+ + self[TestResultCode.ERROR]
+ + self[TestResultCode.UNEXPECTED_PASS]
+ )
+
+
+def parse_results(results):
+ """
+ Prints the number of scheduled, executed, not executed, passed, failed,
+ errored and skipped tests and details about failed and errored tests.
+
+ Also returns all suites where any test failed.
+
+ :param results:
+ :return:
+ """
+
+ results_per_suite = AllResults()
+ crashed = False
+ failed = False
+ for result in results:
+ result_code = results_per_suite.add_result(result)
+ if result_code == 1:
+ failed = True
+ elif result_code == -1:
+ crashed = True
+
+ results_per_suite.print_results()
+
+ if crashed:
+ return_code = -1
+ elif failed:
+ return_code = 1
+ else:
+ return_code = 0
+ return return_code, results_per_suite.rerun
+
+
+if __name__ == "__main__":
+ print(f"Config is: {config}")
+
+ if config.api_preload:
+ VPPApiJSONFiles.load_api(apidir=config.extern_apidir + [config.vpp_install_dir])
+
+ if config.sanity:
+ print("Running sanity test case.")
+ try:
+ rc = sanity_run_vpp.main()
+ if rc != 0:
+ sys.exit(rc)
+ except Exception as e:
+ print(traceback.format_exc())
+ print("Couldn't run sanity test case.")
+ sys.exit(-1)
+
+ test_finished_join_timeout = 15
+
+ debug_gdb = config.debug in ["gdb", "gdbserver", "attach"]
+ debug_core = config.debug == "core"
+
+ run_interactive = debug_gdb or config.step or config.force_foreground
+
+ max_concurrent_tests = 0
+ print(f"OS reports {num_cpus} available cpu(s).")
+
+ test_jobs = config.jobs
+ if test_jobs == "auto":
+ if run_interactive:
+ max_concurrent_tests = 1
+ print("Interactive mode required, running tests consecutively.")
+ else:
+ max_concurrent_tests = num_cpus
+ print(
+ f"Running at most {max_concurrent_tests} python test "
+ "processes concurrently."
+ )
+ else:
+ max_concurrent_tests = test_jobs
+ print(
+ f"Running at most {max_concurrent_tests} python test processes "
+ "concurrently as set by 'TEST_JOBS'."
+ )
+
+ print(f"Using at most {max_vpp_cpus} cpus for VPP threads.")
+
+ if run_interactive and max_concurrent_tests > 1:
+ raise NotImplementedError(
+ "Running tests interactively (DEBUG is gdb[server] or ATTACH or "
+ "STEP is set) in parallel (TEST_JOBS is more than 1) is not "
+ "supported"
+ )
+
+ descriptions = True
+
+ print("Running tests using custom test runner.")
+ filters = [(parse_test_filter(f)) for f in config.filter.split(",")]
+
+ print(
+ "Selected filters: ",
+ "|".join(
+ f"file={filter_file}, class={filter_class}, function={filter_func}"
+ for filter_file, filter_class, filter_func in filters
+ ),
+ )
+
+ filter_cb = FilterByTestOption(filters)
+
+ cb = SplitToSuitesCallback(filter_cb)
+ for d in config.test_src_dir:
+ print("Adding tests from directory tree %s" % d)
+ discover_tests(d, cb)
+
+ # suites are not hashable, need to use list
+ suites = []
+ tests_amount = 0
+ for testcase_suite in cb.suites.values():
+ tests_amount += testcase_suite.countTestCases()
+ if testcase_suite.cpus_used > max_vpp_cpus:
+ # here we replace test functions with lambdas to just skip them
+ # but we also replace setUp/tearDown functions to do nothing
+ # so that the test can be "started" and "stopped", so that we can
+ # still keep those prints (test description - SKIP), which are done
+ # in stopTest() (for that to trigger, test function must run)
+ for t in testcase_suite:
+ for m in dir(t):
+ if m.startswith("test_"):
+ setattr(t, m, lambda: t.skipTest("not enough cpus"))
+ setattr(t.__class__, "setUpClass", lambda: None)
+ setattr(t.__class__, "tearDownClass", lambda: None)
+ setattr(t, "setUp", lambda: None)
+ setattr(t, "tearDown", lambda: None)
+ t.__class__.skipped_due_to_cpu_lack = True
+ suites.append(testcase_suite)
+
+ print(
+ "%s out of %s tests match specified filters"
+ % (tests_amount, tests_amount + cb.filtered.countTestCases())
+ )
+
+ if not config.extended:
+ print("Not running extended tests (some tests will be skipped)")
+
+ attempts = config.retries + 1
+ if attempts > 1:
+ print("Perform %s attempts to pass the suite..." % attempts)
+
+ if run_interactive and suites:
+ # don't fork if requiring interactive terminal
+ print("Running tests in foreground in the current process")
+ full_suite = unittest.TestSuite()
+ free_cpus = list(available_cpus)
+ cpu_shortage = False
+ for suite in suites:
+ if suite.cpus_used <= max_vpp_cpus:
+ suite.assign_cpus(free_cpus[: suite.cpus_used])
+ else:
+ suite.assign_cpus([])
+ cpu_shortage = True
+ full_suite.addTests(suites)
+ result = VppTestRunner(
+ verbosity=config.verbose, failfast=config.failfast, print_summary=True
+ ).run(full_suite)
+ was_successful = result.wasSuccessful()
+ if not was_successful:
+ for test_case_info in result.failed_test_cases_info:
+ handle_failed_suite(
+ test_case_info.logger,
+ test_case_info.tempdir,
+ test_case_info.vpp_pid,
+ config.vpp,
+ )
+ if test_case_info in result.core_crash_test_cases_info:
+ check_and_handle_core(
+ test_case_info.vpp_bin_path,
+ test_case_info.tempdir,
+ test_case_info.core_crash_test,
+ )
+
+ if cpu_shortage:
+ print()
+ print(
+ colorize(
+ "SOME TESTS WERE SKIPPED BECAUSE THERE ARE NOT"
+ " ENOUGH CPUS AVAILABLE",
+ YELLOW,
+ )
+ )
+ print()
+ sys.exit(not was_successful)
+ else:
+ print(
+ "Running each VPPTestCase in a separate background process"
+ f" with at most {max_concurrent_tests} parallel python test "
+ "process(es)"
+ )
+ exit_code = 0
+ while suites and attempts > 0:
+ for suite in suites:
+ failed_link = get_failed_testcase_linkname(
+ config.failed_dir,
+ f"{get_testcase_dirname(suite._tests[0].__class__.__name__)}",
+ )
+ if os.path.islink(failed_link):
+ os.unlink(failed_link)
+ results = run_forked(suites)
+ exit_code, suites = parse_results(results)
+ attempts -= 1
+ if exit_code == 0:
+ print("Test run was successful")
+ else:
+ print("%s attempt(s) left." % attempts)
+ sys.exit(exit_code)