+ if wrapped_testcase_suite.core_detected_at is None:
+ wrapped_testcase_suite.core_detected_at = time.time()
+ elif wrapped_testcase_suite.core_detected_at + \
+ core_timeout < time.time():
+ if not os.path.isfile(
+ "%s/_core_handled" %
+ wrapped_testcase_suite.
+ last_test_temp_dir):
+ wrapped_testcase_suite.logger.critical(
+ "Child python process unresponsive and core-"
+ "file exists in test temporary directory!")
+ fail = True
+
+ if fail:
+ failed_dir = os.getenv('VPP_TEST_FAILED_DIR')
+ if wrapped_testcase_suite.last_test_temp_dir:
+ lttd = os.path.basename(
+ wrapped_testcase_suite.last_test_temp_dir)
+ else:
+ lttd = None
+ link_path = '%s%s-FAILED' % (failed_dir, lttd)
+ wrapped_testcase_suite.logger.error(
+ "Creating a link to the failed test: %s -> %s" %
+ (link_path, lttd))
+ if not os.path.exists(link_path) \
+ and wrapped_testcase_suite.last_test_temp_dir:
+ os.symlink(wrapped_testcase_suite.last_test_temp_dir,
+ link_path)
+ api_post_mortem_path = "/tmp/api_post_mortem.%d" % \
+ wrapped_testcase_suite.vpp_pid
+ if os.path.isfile(api_post_mortem_path):
+ wrapped_testcase_suite.logger.error(
+ "Copying api_post_mortem.%d to %s" %
+ (wrapped_testcase_suite.vpp_pid,
+ wrapped_testcase_suite.last_test_temp_dir))
+ shutil.copy2(api_post_mortem_path,
+ wrapped_testcase_suite.last_test_temp_dir)
+ if wrapped_testcase_suite.last_test_temp_dir and \
+ wrapped_testcase_suite.last_test_vpp_binary:
+ core_path = "%s/core" % \
+ wrapped_testcase_suite.last_test_temp_dir
+ if os.path.isfile(core_path):
+ wrapped_testcase_suite.logger.error(
+ "Core-file exists in test temporary directory: %s!"
+ % core_path)
+ check_core_path(wrapped_testcase_suite.logger,
+ core_path)
+ wrapped_testcase_suite.logger.debug(
+ "Running `file %s':" % core_path)
+ try:
+ info = check_output(["file", core_path])
+ wrapped_testcase_suite.logger.debug(info)
+ except CalledProcessError as e:
+ wrapped_testcase_suite.logger.error(
+ "Could not run `file' utility on core-file, "
+ "rc=%s" % e.returncode)
+ pass
+ if debug_core:
+ spawn_gdb(
+ wrapped_testcase_suite.last_test_vpp_binary,
+ core_path, wrapped_testcase_suite.logger)
+ wrapped_testcase_suite.child.terminate()
+ try:
+ # terminating the child process tends to leave orphan
+ # VPP process around
+ os.kill(wrapped_testcase_suite.vpp_pid, signal.SIGTERM)
+ except OSError:
+ # already dead
+ pass
+ results.append((wrapped_testcase_suite.testcase_suite,
+ wrapped_testcase_suite.partial_result))
+ finished_testcase_suites.add(wrapped_testcase_suite)
+
+ for finished_testcase in finished_testcase_suites:
+ finished_testcase.child.join()
+ finished_testcase.close_pipes()
+ wrapped_testcase_suites.remove(finished_testcase)
+ finished_unread_testcases.add(finished_testcase)
+ finished_testcase.stdouterr_queue.put(None)
+ if len(testcase_suites) > 0:
+ new_testcase = TestCaseWrapper(testcase_suites.pop(0), manager)
+ wrapped_testcase_suites.add(new_testcase)
+ unread_testcases.add(new_testcase)
+
+ read_from_testcases.clear()
+ stdouterr_thread.join(test_timeout)
+ manager.shutdown()
+ return results
+
+
+class SplitToSuitesCallback:
+ def __init__(self, filter_callback):
+ self.suites = {}
+ self.suite_name = 'default'
+ self.filter_callback = filter_callback
+ self.filtered = unittest.TestSuite()
+
+ def __call__(self, file_name, cls, method):
+ test_method = cls(method)
+ if self.filter_callback(file_name, cls.__name__, method):
+ self.suite_name = file_name + cls.__name__
+ if self.suite_name not in self.suites:
+ self.suites[self.suite_name] = unittest.TestSuite()
+ self.suites[self.suite_name].addTest(test_method)
+
+ else:
+ self.filtered.addTest(test_method)
+
+
+test_option = "TEST"
+
+
+def parse_test_option():
+ f = os.getenv(test_option, None)
+ filter_file_name = None
+ filter_class_name = None
+ filter_func_name = None
+ if f:
+ if '.' in f:
+ parts = f.split('.')
+ if len(parts) > 3:
+ raise Exception("Unrecognized %s option: %s" %
+ (test_option, f))
+ if len(parts) > 2:
+ if parts[2] not in ('*', ''):
+ filter_func_name = parts[2]
+ if parts[1] not in ('*', ''):
+ filter_class_name = parts[1]
+ if parts[0] not in ('*', ''):
+ if parts[0].startswith('test_'):
+ filter_file_name = parts[0]
+ else:
+ filter_file_name = 'test_%s' % parts[0]
+ else:
+ if f.startswith('test_'):
+ filter_file_name = f
+ else:
+ filter_file_name = 'test_%s' % f
+ if filter_file_name:
+ filter_file_name = '%s.py' % filter_file_name
+ return filter_file_name, filter_class_name, filter_func_name
+
+
+def filter_tests(tests, filter_cb):
+ result = unittest.suite.TestSuite()
+ for t in tests:
+ if isinstance(t, unittest.suite.TestSuite):
+ # this is a bunch of tests, recursively filter...
+ x = filter_tests(t, filter_cb)
+ if x.countTestCases() > 0:
+ result.addTest(x)
+ elif isinstance(t, unittest.TestCase):
+ # this is a single test
+ parts = t.id().split('.')
+ # t.id() for common cases like this:
+ # test_classifier.TestClassifier.test_acl_ip
+ # apply filtering only if it is so
+ if len(parts) == 3:
+ if not filter_cb(parts[0], parts[1], parts[2]):
+ continue
+ result.addTest(t)
+ else:
+ # unexpected object, don't touch it
+ result.addTest(t)
+ return result
+
+
+class FilterByTestOption:
+ def __init__(self, filter_file_name, filter_class_name, filter_func_name):
+ self.filter_file_name = filter_file_name
+ self.filter_class_name = filter_class_name
+ self.filter_func_name = filter_func_name
+
+ def __call__(self, file_name, class_name, func_name):
+ if self.filter_file_name and file_name != self.filter_file_name:
+ return False
+ if self.filter_class_name and class_name != self.filter_class_name:
+ return False
+ if self.filter_func_name and func_name != self.filter_func_name:
+ return False
+ return True
+
+
+class FilterByClassList:
+ def __init__(self, classes_with_filenames):
+ self.classes_with_filenames = classes_with_filenames
+
+ def __call__(self, file_name, class_name, func_name):
+ return '.'.join([file_name, class_name]) in self.classes_with_filenames
+
+
+def suite_from_failed(suite, failed):
+ failed = {x.rsplit('.', 1)[0] for x in failed}
+ filter_cb = FilterByClassList(failed)
+ suite = filter_tests(suite, filter_cb)
+ return suite
+
+
+class NonPassedResults(dict):
+ def __init__(self):
+ super(NonPassedResults, self).__init__()
+ self.all_testcases = 0
+ self.results_per_suite = {}
+ self.failures_id = 'failures'
+ self.errors_id = 'errors'
+ self.crashes_id = 'crashes'
+ self.skipped_id = 'skipped'
+ self.expectedFailures_id = 'expectedFailures'
+ self.unexpectedSuccesses_id = 'unexpectedSuccesses'
+ self.rerun = []
+ self.passed = 0
+ self[self.failures_id] = 0
+ self[self.errors_id] = 0
+ self[self.skipped_id] = 0
+ self[self.expectedFailures_id] = 0
+ self[self.unexpectedSuccesses_id] = 0
+
+ def _add_result(self, test, result_id):
+ if isinstance(test, VppTestCase):
+ parts = test.id().split('.')
+ if len(parts) == 3:
+ tc_class = get_testcase_doc_name(test)
+ if tc_class not in self.results_per_suite:
+ # failed, errored, skipped, expectedly failed,
+ # unexpectedly passed
+ self.results_per_suite[tc_class] = \
+ {self.failures_id: [],
+ self.errors_id: [],
+ self.skipped_id: [],
+ self.expectedFailures_id: [],
+ self.unexpectedSuccesses_id: []}
+ self.results_per_suite[tc_class][result_id].append(test)
+ return True
+ return False
+
+ def add_results(self, testcases, testcase_result_id):
+ for failed_testcase, _ in testcases:
+ if self._add_result(failed_testcase, testcase_result_id):
+ self[testcase_result_id] += 1
+
+ def add_result(self, testcase_suite, result):
+ retval = 0
+ if result:
+ self.all_testcases += result.testsRun
+ self.passed += len(result.passed)
+ if not len(result.passed) + len(result.skipped) \
+ == testcase_suite.countTestCases():
+ retval = 1
+
+ self.add_results(result.failures, self.failures_id)
+ self.add_results(result.errors, self.errors_id)
+ self.add_results(result.skipped, self.skipped_id)
+ self.add_results(result.expectedFailures,
+ self.expectedFailures_id)
+ self.add_results(result.unexpectedSuccesses,
+ self.unexpectedSuccesses_id)
+ else:
+ retval = -1
+
+ if retval != 0:
+ if concurrent_tests == 1:
+ if result:
+ rerun_ids = set([])
+ skipped = [x.id() for (x, _) in result.skipped]
+ for testcase in testcase_suite:
+ tc_id = testcase.id()
+ if tc_id not in result.passed and \
+ tc_id not in skipped:
+ rerun_ids.add(tc_id)
+ if len(rerun_ids) > 0:
+ self.rerun.append(suite_from_failed(testcase_suite,
+ rerun_ids))
+ else:
+ self.rerun.append(testcase_suite)
+ else:
+ self.rerun.append(testcase_suite)
+
+ return retval
+
+ def print_results(self):
+ print('')
+ print(double_line_delim)
+ print('TEST RESULTS:')
+ print(' Executed tests: {}'.format(self.all_testcases))
+ print(' Passed tests: {}'.format(
+ colorize(str(self.passed), GREEN)))
+ if self[self.failures_id] > 0:
+ print(' Failures: {}'.format(
+ colorize(str(self[self.failures_id]), RED)))
+ if self[self.errors_id] > 0:
+ print(' Errors: {}'.format(
+ colorize(str(self[self.errors_id]), RED)))
+ if self[self.skipped_id] > 0:
+ print(' Skipped tests: {}'.format(
+ colorize(str(self[self.skipped_id]), YELLOW)))
+ if self[self.expectedFailures_id] > 0:
+ print(' Expected failures: {}'.format(
+ colorize(str(self[self.expectedFailures_id]), GREEN)))
+ if self[self.unexpectedSuccesses_id] > 0:
+ print(' Unexpected successes: {}'.format(
+ colorize(str(self[self.unexpectedSuccesses_id]), YELLOW)))
+
+ if self.all_failed > 0:
+ print('FAILED TESTS:')
+ for testcase_class, suite_results in \
+ self.results_per_suite.items():
+ failed_testcases = suite_results[
+ self.failures_id]
+ errored_testcases = suite_results[
+ self.errors_id]
+ if len(failed_testcases) or len(errored_testcases):
+ print(' Testcase name: {}'.format(
+ colorize(testcase_class, RED)))
+ for failed_test in failed_testcases:
+ print(' FAILED: {}'.format(
+ colorize(get_test_description(
+ descriptions, failed_test), RED)))
+ for failed_test in errored_testcases:
+ print(' ERRORED: {}'.format(
+ colorize(get_test_description(
+ descriptions, failed_test), RED)))
+
+ print(double_line_delim)
+ print('')
+
+ @property
+ def all_failed(self):
+ return self[self.failures_id] + self[self.errors_id]
+
+
+def parse_results(results):
+ """
+ Prints the number of executed, passed, failed, errored, skipped,
+ expectedly failed and unexpectedly passed tests and details about
+ failed, errored, expectedly failed and unexpectedly passed tests.
+
+ Also returns any suites where any test failed.
+
+ :param results:
+ :return:
+ """
+
+ results_per_suite = NonPassedResults()
+ crashed = False
+ failed = False
+ for testcase_suite, result in results:
+ result_code = results_per_suite.add_result(testcase_suite, result)
+ if result_code == 1:
+ failed = True
+ elif result_code == -1:
+ crashed = True
+
+ results_per_suite.print_results()
+
+ if crashed:
+ return_code = -1
+ elif failed:
+ return_code = 1
+ else:
+ return_code = 0
+ return return_code, results_per_suite.rerun
+
+
+def parse_digit_env(env_var, default):
+ value = os.getenv(env_var, default)
+ if value != default:
+ if value.isdigit():
+ value = int(value)
+ else:
+ print('WARNING: unsupported value "%s" for env var "%s",'
+ 'defaulting to %s' % (value, env_var, default))
+ value = default
+ return value