X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=test%2Frun_tests.py;h=499d6df129074df838d5ace76c454da49cf928dc;hb=282872127;hp=0af57ac46b800e1926cad079477fd30108d35647;hpb=2eca70db953c21d2cb797ad7a172e9b1c0ccd299;p=vpp.git diff --git a/test/run_tests.py b/test/run_tests.py index 0af57ac46b8..499d6df1290 100644 --- a/test/run_tests.py +++ b/test/run_tests.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import sys import shutil @@ -11,9 +11,11 @@ import threading import signal import psutil import re +import multiprocessing from multiprocessing import Process, Pipe, cpu_count from multiprocessing.queues import Queue from multiprocessing.managers import BaseManager +import framework from framework import VppTestRunner, running_extended_tests, VppTestCase, \ get_testcase_doc_name, get_test_description, PASS, FAIL, ERROR, SKIP, \ TEST_RUN @@ -83,7 +85,7 @@ class TestResult(dict): tc_id = testcase.id() if tc_id not in self[PASS] and tc_id not in self[SKIP]: rerun_ids.add(tc_id) - if len(rerun_ids) > 0: + if rerun_ids: return suite_from_failed(self.testcase_suite, rerun_ids) def get_testcase_names(self, test_id): @@ -218,11 +220,11 @@ class TestCaseWrapper(object): def stdouterr_reader_wrapper(unread_testcases, finished_unread_testcases, read_testcases): read_testcase = None - while read_testcases.is_set() or len(unread_testcases): - if len(finished_unread_testcases): + while read_testcases.is_set() or unread_testcases: + if finished_unread_testcases: read_testcase = finished_unread_testcases.pop() unread_testcases.remove(read_testcase) - elif len(unread_testcases): + elif unread_testcases: read_testcase = unread_testcases.pop() if read_testcase: data = '' @@ -253,13 +255,23 @@ def handle_failed_suite(logger, last_test_temp_dir, vpp_pid): "Core-file exists in test temporary directory: %s!" % core_path) check_core_path(logger, core_path) - logger.debug("Running `file %s':" % core_path) + logger.debug("Running 'file %s':" % core_path) try: info = check_output(["file", core_path]) logger.debug(info) except CalledProcessError as e: - logger.error("Could not run `file' utility on core-file, " - "rc=%s" % e.returncode) + logger.error("Subprocess returned with return code " + "while running `file' utility on core-file " + "returned: " + "rc=%s", e.returncode) + except OSError as e: + logger.error("Subprocess returned with OS error while " + "running 'file' utility " + "on core-file: " + "(%s) %s", e.errno, e.strerror) + except Exception as e: + logger.exception("Unexpected error running `file' utility " + "on core-file") if vpp_pid: # Copy api post mortem @@ -272,20 +284,23 @@ def handle_failed_suite(logger, last_test_temp_dir, vpp_pid): def check_and_handle_core(vpp_binary, tempdir, core_crash_test): if is_core_present(tempdir): - print('VPP core detected in %s. Last test running was %s' % - (tempdir, core_crash_test)) - print(single_line_delim) - spawn_gdb(vpp_binary, get_core_path(tempdir)) - print(single_line_delim) + if debug_core: + print('VPP core detected in %s. Last test running was %s' % + (tempdir, core_crash_test)) + print(single_line_delim) + spawn_gdb(vpp_binary, get_core_path(tempdir)) + print(single_line_delim) + elif compress_core: + print("Compressing core-file in test directory `%s'" % tempdir) + os.system("gzip %s" % get_core_path(tempdir)) def handle_cores(failed_testcases): - if debug_core: - for failed_testcase in failed_testcases: - tcs_with_core = failed_testcase.testclasess_with_core - if len(tcs_with_core) > 0: - for test, vpp_binary, tempdir in tcs_with_core.values(): - check_and_handle_core(vpp_binary, tempdir, test) + for failed_testcase in failed_testcases: + tcs_with_core = failed_testcase.testclasess_with_core + if tcs_with_core: + for test, vpp_binary, tempdir in tcs_with_core.values(): + check_and_handle_core(vpp_binary, tempdir, test) def process_finished_testsuite(wrapped_testcase_suite, @@ -317,7 +332,7 @@ def run_forked(testcase_suites): manager = StreamQueueManager() manager.start() for i in range(concurrent_tests): - if len(testcase_suites) > 0: + if testcase_suites: wrapped_testcase_suite = TestCaseWrapper(testcase_suites.pop(0), manager) wrapped_testcase_suites.add(wrapped_testcase_suite) @@ -337,7 +352,7 @@ def run_forked(testcase_suites): stop_run = False try: - while len(wrapped_testcase_suites) > 0: + while wrapped_testcase_suites: finished_testcase_suites = set() for wrapped_testcase_suite in wrapped_testcase_suites: while wrapped_testcase_suite.result_parent_end.poll(): @@ -418,19 +433,30 @@ def run_forked(testcase_suites): results) or stop_run for finished_testcase in finished_testcase_suites: - finished_testcase.child.join() + # Somewhat surprisingly, the join below may + # timeout, even if client signaled that + # it finished - so we note it just in case. + join_start = time.time() + finished_testcase.child.join(test_finished_join_timeout) + join_end = time.time() + if join_end - join_start >= test_finished_join_timeout: + finished_testcase.logger.error( + "Timeout joining finished test: %s (pid %d)" % + (finished_testcase.last_test, + finished_testcase.child.pid)) finished_testcase.close_pipes() wrapped_testcase_suites.remove(finished_testcase) finished_unread_testcases.add(finished_testcase) finished_testcase.stdouterr_queue.put(None) if stop_run: - while len(testcase_suites) > 0: + while testcase_suites: results.append(TestResult(testcase_suites.pop(0))) - elif len(testcase_suites) > 0: + elif testcase_suites: new_testcase = TestCaseWrapper(testcase_suites.pop(0), manager) wrapped_testcase_suites.add(new_testcase) unread_testcases.add(new_testcase) + time.sleep(0.1) except Exception: for wrapped_testcase_suite in wrapped_testcase_suites: wrapped_testcase_suite.child.terminate() @@ -620,7 +646,7 @@ class AllResults(dict): failed_testcase_ids = result[FAIL] errored_testcase_ids = result[ERROR] old_testcase_name = None - if len(failed_testcase_ids) or len(errored_testcase_ids): + if failed_testcase_ids: for failed_test_id in failed_testcase_ids: new_testcase_name, test_name = \ result.get_testcase_names(failed_test_id) @@ -630,16 +656,17 @@ class AllResults(dict): old_testcase_name = new_testcase_name print(' FAILURE: {} [{}]'.format( colorize(test_name, RED), failed_test_id)) - for failed_test_id in errored_testcase_ids: + if errored_testcase_ids: + for errored_test_id in errored_testcase_ids: new_testcase_name, test_name = \ - result.get_testcase_names(failed_test_id) + result.get_testcase_names(errored_test_id) if new_testcase_name != old_testcase_name: print(' Testcase name: {}'.format( colorize(new_testcase_name, RED))) old_testcase_name = new_testcase_name print(' ERROR: {} [{}]'.format( - colorize(test_name, RED), failed_test_id)) - if len(self.testsuites_no_tests_run) > 0: + colorize(test_name, RED), errored_test_id)) + if self.testsuites_no_tests_run: print('TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:') tc_classes = set() for testsuite in self.testsuites_no_tests_run: @@ -710,15 +737,28 @@ if __name__ == '__main__': test_timeout = parse_digit_env("TIMEOUT", 600) # default = 10 minutes + test_finished_join_timeout = 15 + retries = parse_digit_env("RETRIES", 0) debug = os.getenv("DEBUG", "n").lower() in ["gdb", "gdbserver"] debug_core = os.getenv("DEBUG", "").lower() == "core" + compress_core = framework.BoolEnvironmentVariable("CORE_COMPRESS") - step = os.getenv("STEP", "n").lower() in ("y", "yes", "1") + step = framework.BoolEnvironmentVariable("STEP") + force_foreground = framework.BoolEnvironmentVariable("FORCE_FOREGROUND") + + run_interactive = debug or step or force_foreground + + try: + num_cpus = len(os.sched_getaffinity(0)) + except AttributeError: + num_cpus = multiprocessing.cpu_count() + shm_free = psutil.disk_usage('/dev/shm').free - run_interactive = debug or step + print('OS reports %s available cpu(s). Free shm: %s' % ( + num_cpus, "{:,}MB".format(shm_free / (1024 * 1024)))) test_jobs = os.getenv("TEST_JOBS", "1").lower() # default = 1 process if test_jobs == 'auto': @@ -726,7 +766,6 @@ if __name__ == '__main__': concurrent_tests = 1 print('Interactive mode required, running on one core') else: - shm_free = psutil.disk_usage('/dev/shm').free shm_max_processes = 1 if shm_free < min_req_shm: raise Exception('Not enough free space in /dev/shm. Required ' @@ -734,14 +773,17 @@ if __name__ == '__main__': % (min_req_shm >> 20)) else: extra_shm = shm_free - min_req_shm - shm_max_processes += extra_shm / shm_per_process + shm_max_processes += extra_shm // shm_per_process concurrent_tests = min(cpu_count(), shm_max_processes) print('Found enough resources to run tests with %s cores' % concurrent_tests) elif test_jobs.isdigit(): concurrent_tests = int(test_jobs) + print("Running on %s core(s) as set by 'TEST_JOBS'." % + concurrent_tests) else: concurrent_tests = 1 + print('Running on one core.') if run_interactive and concurrent_tests > 1: raise NotImplementedError( @@ -782,34 +824,38 @@ if __name__ == '__main__': print("%s out of %s tests match specified filters" % ( tests_amount, tests_amount + cb.filtered.countTestCases())) - if not running_extended_tests(): + if not running_extended_tests: print("Not running extended tests (some tests will be skipped)") attempts = retries + 1 if attempts > 1: print("Perform %s attempts to pass the suite..." % attempts) - if run_interactive: + if run_interactive and suites: # don't fork if requiring interactive terminal + print('Running tests in foreground in the current process') + full_suite = unittest.TestSuite() + full_suite.addTests(suites) result = VppTestRunner(verbosity=verbose, failfast=failfast, - print_summary=True).run(suites[0]) + print_summary=True).run(full_suite) was_successful = result.wasSuccessful() if not was_successful: for test_case_info in result.failed_test_cases_info: handle_failed_suite(test_case_info.logger, test_case_info.tempdir, test_case_info.vpp_pid) - if debug_core and \ - test_case_info in result.core_crash_test_cases_info: + if test_case_info in result.core_crash_test_cases_info: check_and_handle_core(test_case_info.vpp_bin_path, test_case_info.tempdir, test_case_info.core_crash_test) sys.exit(not was_successful) else: + print('Running each VPPTestCase in a separate background process' + ' with {} parallel process(es)'.format(concurrent_tests)) exit_code = 0 - while len(suites) > 0 and attempts > 0: + while suites and attempts > 0: results = run_forked(suites) exit_code, suites = parse_results(results) attempts -= 1