X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=test%2Frun_tests.py;h=66e0ee4db398ac2cbedecb5767239d9f40a6962f;hb=62af9bb645ac2fdb9777b823d55f5069258f736b;hp=5df37efba6bbca85ea1001901eca81bb8b5e365f;hpb=d9b0c6fbf7aa5bd9af84264105b39c82028a4a29;p=vpp.git diff --git a/test/run_tests.py b/test/run_tests.py index 5df37efba6b..66e0ee4db39 100644 --- a/test/run_tests.py +++ b/test/run_tests.py @@ -5,7 +5,6 @@ import shutil import os import fnmatch import unittest -import argparse import time import threading import traceback @@ -14,21 +13,18 @@ import re from multiprocessing import Process, Pipe, get_context from multiprocessing.queues import Queue from multiprocessing.managers import BaseManager -import framework from config import config, num_cpus, available_cpus, max_vpp_cpus -from framework import ( +from vpp_papi import VPPApiJSONFiles +from asfframework import ( VppTestRunner, - VppTestCase, get_testcase_doc_name, get_test_description, - PASS, - FAIL, - ERROR, - SKIP, - TEST_RUN, - SKIP_CPU_SHORTAGE, + get_failed_testcase_linkname, + get_testcase_dirname, ) -from debug import spawn_gdb, start_vpp_in_gdb +from framework import VppTestCase +from test_result_code import TestResultCode +from debug import spawn_gdb from log import ( get_parallel_logger, double_line_delim, @@ -72,12 +68,8 @@ StreamQueueManager.register("StreamQueue", StreamQueue) class TestResult(dict): def __init__(self, testcase_suite, testcases_by_id=None): super(TestResult, self).__init__() - self[PASS] = [] - self[FAIL] = [] - self[ERROR] = [] - self[SKIP] = [] - self[SKIP_CPU_SHORTAGE] = [] - self[TEST_RUN] = [] + for trc in list(TestResultCode): + self[trc] = [] self.crashed = False self.testcase_suite = testcase_suite self.testcases = [testcase for testcase in testcase_suite] @@ -85,13 +77,19 @@ class TestResult(dict): def was_successful(self): return ( - 0 == len(self[FAIL]) == len(self[ERROR]) - and len(self[PASS] + self[SKIP] + self[SKIP_CPU_SHORTAGE]) + 0 + == len(self[TestResultCode.FAIL]) + == len(self[TestResultCode.ERROR]) + == len(self[TestResultCode.UNEXPECTED_PASS]) + and len(self[TestResultCode.PASS]) + + len(self[TestResultCode.SKIP]) + + len(self[TestResultCode.SKIP_CPU_SHORTAGE]) + + len(self[TestResultCode.EXPECTED_FAIL]) == self.testcase_suite.countTestCases() ) def no_tests_run(self): - return 0 == len(self[TEST_RUN]) + return 0 == len(self[TestResultCode.TEST_RUN]) def process_result(self, test_id, result): self[result].append(test_id) @@ -100,7 +98,13 @@ class TestResult(dict): rerun_ids = set([]) for testcase in self.testcase_suite: tc_id = testcase.id() - if tc_id not in self[PASS] + self[SKIP] + self[SKIP_CPU_SHORTAGE]: + if ( + tc_id + not in self[TestResultCode.PASS] + + self[TestResultCode.SKIP] + + self[TestResultCode.SKIP_CPU_SHORTAGE] + + self[TestResultCode.EXPECTED_FAIL] + ): rerun_ids.add(tc_id) if rerun_ids: return suite_from_failed(self.testcase_suite, rerun_ids) @@ -269,7 +273,7 @@ def handle_failed_suite(logger, last_test_temp_dir, vpp_pid, vpp_binary): if last_test_temp_dir: # Need to create link in case of a timeout or core dump without failure lttd = os.path.basename(last_test_temp_dir) - link_path = "%s%s-FAILED" % (config.failed_dir, lttd) + link_path = os.path.join(config.failed_dir, f"{lttd}-FAILED") if not os.path.exists(link_path): os.symlink(last_test_temp_dir, link_path) logger.error( @@ -432,7 +436,7 @@ def run_forked(testcase_suites): stop_run = False try: - while wrapped_testcase_suites: + while wrapped_testcase_suites or testcase_suites: finished_testcase_suites = set() for wrapped_testcase_suite in wrapped_testcase_suites: while wrapped_testcase_suite.result_parent_end.poll(): @@ -520,7 +524,7 @@ def run_forked(testcase_suites): pass wrapped_testcase_suite.result.crashed = True wrapped_testcase_suite.result.process_result( - wrapped_testcase_suite.last_test_id, ERROR + wrapped_testcase_suite.last_test_id, TestResultCode.ERROR ) stop_run = ( process_finished_testsuite( @@ -553,14 +557,16 @@ def run_forked(testcase_suites): while testcase_suites: results.append(TestResult(testcase_suites.pop(0))) elif testcase_suites: - a_suite = testcase_suites.pop(0) + a_suite = testcase_suites[0] while a_suite and a_suite.is_tagged_run_solo: + testcase_suites.pop(0) solo_testcase_suites.append(a_suite) if testcase_suites: - a_suite = testcase_suites.pop(0) + a_suite = testcase_suites[0] else: a_suite = None if a_suite and can_run_suite(a_suite): + testcase_suites.pop(0) run_suite(a_suite) if solo_testcase_suites and tests_running == 0: a_suite = solo_testcase_suites.pop(0) @@ -633,7 +639,7 @@ def parse_test_filter(test_filter): if "." in f: parts = f.split(".") if len(parts) > 3: - raise Exception("Unrecognized %s option: %s" % (test_option, f)) + raise Exception(f"Invalid test filter: {test_filter}") if len(parts) > 2: if parts[2] not in ("*", ""): filter_func_name = parts[2] @@ -679,21 +685,40 @@ def filter_tests(tests, filter_cb): class FilterByTestOption: - def __init__(self, filter_file_name, filter_class_name, filter_func_name): - self.filter_file_name = filter_file_name - self.filter_class_name = filter_class_name - self.filter_func_name = filter_func_name + def __init__(self, filters): + self.filters = filters def __call__(self, file_name, class_name, func_name): - if self.filter_file_name: - fn_match = fnmatch.fnmatch(file_name, self.filter_file_name) - if not fn_match: + def test_one( + filter_file_name, + filter_class_name, + filter_func_name, + file_name, + class_name, + func_name, + ): + if filter_file_name: + fn_match = fnmatch.fnmatch(file_name, filter_file_name) + if not fn_match: + return False + if filter_class_name and class_name != filter_class_name: return False - if self.filter_class_name and class_name != self.filter_class_name: - return False - if self.filter_func_name and func_name != self.filter_func_name: - return False - return True + if filter_func_name and func_name != filter_func_name: + return False + return True + + for filter_file_name, filter_class_name, filter_func_name in self.filters: + if test_one( + filter_file_name, + filter_class_name, + filter_func_name, + file_name, + class_name, + func_name, + ): + return True + + return False class FilterByClassList: @@ -716,20 +741,15 @@ class AllResults(dict): super(AllResults, self).__init__() self.all_testcases = 0 self.results_per_suite = [] - self[PASS] = 0 - self[FAIL] = 0 - self[ERROR] = 0 - self[SKIP] = 0 - self[SKIP_CPU_SHORTAGE] = 0 - self[TEST_RUN] = 0 + for trc in list(TestResultCode): + self[trc] = 0 self.rerun = [] self.testsuites_no_tests_run = [] def add_results(self, result): self.results_per_suite.append(result) - result_types = [PASS, FAIL, ERROR, SKIP, TEST_RUN, SKIP_CPU_SHORTAGE] - for result_type in result_types: - self[result_type] += len(result[result_type]) + for trc in list(TestResultCode): + self[trc] += len(result[trc]) def add_result(self, result): retval = 0 @@ -766,63 +786,72 @@ class AllResults(dict): indent_results( [ f"Scheduled tests: {self.all_testcases}", - f"Executed tests: {self[TEST_RUN]}", - f"Passed tests: {colorize(self[PASS], GREEN)}", - f"Skipped tests: {colorize(self[SKIP], YELLOW)}" - if self[SKIP] - else None, - f"Not Executed tests: {colorize(self.not_executed, RED)}" - if self.not_executed - else None, - f"Failures: {colorize(self[FAIL], RED)}" if self[FAIL] else None, - f"Errors: {colorize(self[ERROR], RED)}" if self[ERROR] else None, - "Tests skipped due to lack of CPUS: " - f"{colorize(self[SKIP_CPU_SHORTAGE], YELLOW)}" - if self[SKIP_CPU_SHORTAGE] - else None, + f"Executed tests: {self[TestResultCode.TEST_RUN]}", + f"Passed tests: {colorize(self[TestResultCode.PASS], GREEN)}", + ( + f"Expected failures: {colorize(self[TestResultCode.EXPECTED_FAIL], GREEN)}" + if self[TestResultCode.EXPECTED_FAIL] + else None + ), + ( + f"Skipped tests: {colorize(self[TestResultCode.SKIP], YELLOW)}" + if self[TestResultCode.SKIP] + else None + ), + ( + f"Not Executed tests: {colorize(self.not_executed, RED)}" + if self.not_executed + else None + ), + ( + f"Failures: {colorize(self[TestResultCode.FAIL], RED)}" + if self[TestResultCode.FAIL] + else None + ), + ( + f"Unexpected passes: {colorize(self[TestResultCode.UNEXPECTED_PASS], RED)}" + if self[TestResultCode.UNEXPECTED_PASS] + else None + ), + ( + f"Errors: {colorize(self[TestResultCode.ERROR], RED)}" + if self[TestResultCode.ERROR] + else None + ), + ( + "Tests skipped due to lack of CPUS: " + f"{colorize(self[TestResultCode.SKIP_CPU_SHORTAGE], YELLOW)}" + if self[TestResultCode.SKIP_CPU_SHORTAGE] + else None + ), ] ) if self.all_failed > 0: print("FAILURES AND ERRORS IN TESTS:") for result in self.results_per_suite: - failed_testcase_ids = result[FAIL] - errored_testcase_ids = result[ERROR] old_testcase_name = None - if failed_testcase_ids: - for failed_test_id in failed_testcase_ids: + for tr_code, headline in ( + (TestResultCode.FAIL, "FAILURE"), + (TestResultCode.ERROR, "ERROR"), + (TestResultCode.UNEXPECTED_PASS, "UNEXPECTED PASS"), + ): + if not result[tr_code]: + continue + + for failed_test_id in result[tr_code]: new_testcase_name, test_name = result.get_testcase_names( failed_test_id ) if new_testcase_name != old_testcase_name: print( - " Testcase name: {}".format( - colorize(new_testcase_name, RED) - ) + f" Testcase name: {colorize(new_testcase_name, RED)}" ) old_testcase_name = new_testcase_name print( - " FAILURE: {} [{}]".format( - colorize(test_name, RED), failed_test_id - ) - ) - if errored_testcase_ids: - for errored_test_id in errored_testcase_ids: - new_testcase_name, test_name = result.get_testcase_names( - errored_test_id - ) - if new_testcase_name != old_testcase_name: - print( - " Testcase name: {}".format( - colorize(new_testcase_name, RED) - ) - ) - old_testcase_name = new_testcase_name - print( - " ERROR: {} [{}]".format( - colorize(test_name, RED), errored_test_id - ) + f" {headline}: {colorize(test_name, RED)} [{failed_test_id}]" ) + if self.testsuites_no_tests_run: print("TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:") tc_classes = set() @@ -832,7 +861,7 @@ class AllResults(dict): for tc_class in tc_classes: print(" {}".format(colorize(tc_class, RED))) - if self[SKIP_CPU_SHORTAGE]: + if self[TestResultCode.SKIP_CPU_SHORTAGE]: print() print( colorize( @@ -846,11 +875,15 @@ class AllResults(dict): @property def not_executed(self): - return self.all_testcases - self[TEST_RUN] + return self.all_testcases - self[TestResultCode.TEST_RUN] @property def all_failed(self): - return self[FAIL] + self[ERROR] + return ( + self[TestResultCode.FAIL] + + self[TestResultCode.ERROR] + + self[TestResultCode.UNEXPECTED_PASS] + ) def parse_results(results): @@ -886,9 +919,11 @@ def parse_results(results): if __name__ == "__main__": - print(f"Config is: {config}") + if config.api_preload: + VPPApiJSONFiles.load_api(apidir=config.extern_apidir + [config.vpp_install_dir]) + if config.sanity: print("Running sanity test case.") try: @@ -940,20 +975,22 @@ if __name__ == "__main__": descriptions = True print("Running tests using custom test runner.") - filter_file, filter_class, filter_func = parse_test_filter(config.filter) + filters = [(parse_test_filter(f)) for f in config.filter.split(",")] print( - "Selected filters: file=%s, class=%s, function=%s" - % (filter_file, filter_class, filter_func) + "Selected filters: ", + "|".join( + f"file={filter_file}, class={filter_class}, function={filter_func}" + for filter_file, filter_class, filter_func in filters + ), ) - filter_cb = FilterByTestOption(filter_file, filter_class, filter_func) + filter_cb = FilterByTestOption(filters) - ignore_path = config.venv_dir cb = SplitToSuitesCallback(filter_cb) for d in config.test_src_dir: print("Adding tests from directory tree %s" % d) - discover_tests(d, cb, ignore_path) + discover_tests(d, cb) # suites are not hashable, need to use list suites = [] @@ -1040,6 +1077,13 @@ if __name__ == "__main__": ) exit_code = 0 while suites and attempts > 0: + for suite in suites: + failed_link = get_failed_testcase_linkname( + config.failed_dir, + f"{get_testcase_dirname(suite._tests[0].__class__.__name__)}", + ) + if os.path.islink(failed_link): + os.unlink(failed_link) results = run_forked(suites) exit_code, suites = parse_results(results) attempts -= 1