import os
import fnmatch
import unittest
-import argparse
import time
import threading
import traceback
from multiprocessing import Process, Pipe, get_context
from multiprocessing.queues import Queue
from multiprocessing.managers import BaseManager
-import framework
from config import config, num_cpus, available_cpus, max_vpp_cpus
from framework import (
VppTestRunner,
VppTestCase,
get_testcase_doc_name,
get_test_description,
- PASS,
- FAIL,
- ERROR,
- SKIP,
- TEST_RUN,
- SKIP_CPU_SHORTAGE,
)
-from debug import spawn_gdb, start_vpp_in_gdb
+from test_result_code import TestResultCode
+from debug import spawn_gdb
from log import (
get_parallel_logger,
double_line_delim,
class TestResult(dict):
def __init__(self, testcase_suite, testcases_by_id=None):
super(TestResult, self).__init__()
- self[PASS] = []
- self[FAIL] = []
- self[ERROR] = []
- self[SKIP] = []
- self[SKIP_CPU_SHORTAGE] = []
- self[TEST_RUN] = []
+ for trc in list(TestResultCode):
+ self[trc] = []
self.crashed = False
self.testcase_suite = testcase_suite
self.testcases = [testcase for testcase in testcase_suite]
def was_successful(self):
return (
- 0 == len(self[FAIL]) == len(self[ERROR])
- and len(self[PASS] + self[SKIP] + self[SKIP_CPU_SHORTAGE])
+ 0
+ == len(self[TestResultCode.FAIL])
+ == len(self[TestResultCode.ERROR])
+ == len(self[TestResultCode.UNEXPECTED_PASS])
+ and len(self[TestResultCode.PASS])
+ + len(self[TestResultCode.SKIP])
+ + len(self[TestResultCode.SKIP_CPU_SHORTAGE])
+ + len(self[TestResultCode.EXPECTED_FAIL])
== self.testcase_suite.countTestCases()
)
def no_tests_run(self):
- return 0 == len(self[TEST_RUN])
+ return 0 == len(self[TestResultCode.TEST_RUN])
def process_result(self, test_id, result):
self[result].append(test_id)
rerun_ids = set([])
for testcase in self.testcase_suite:
tc_id = testcase.id()
- if tc_id not in self[PASS] + self[SKIP] + self[SKIP_CPU_SHORTAGE]:
+ if (
+ tc_id
+ not in self[TestResultCode.PASS]
+ + self[TestResultCode.SKIP]
+ + self[TestResultCode.SKIP_CPU_SHORTAGE]
+ + self[TestResultCode.EXPECTED_FAIL]
+ ):
rerun_ids.add(tc_id)
if rerun_ids:
return suite_from_failed(self.testcase_suite, rerun_ids)
if last_test_temp_dir:
# Need to create link in case of a timeout or core dump without failure
lttd = os.path.basename(last_test_temp_dir)
- link_path = "%s%s-FAILED" % (config.failed_dir, lttd)
+ link_path = os.path.join(config.failed_dir, f"{lttd}-FAILED")
if not os.path.exists(link_path):
os.symlink(last_test_temp_dir, link_path)
logger.error(
stop_run = False
try:
- while wrapped_testcase_suites:
+ while wrapped_testcase_suites or testcase_suites:
finished_testcase_suites = set()
for wrapped_testcase_suite in wrapped_testcase_suites:
while wrapped_testcase_suite.result_parent_end.poll():
pass
wrapped_testcase_suite.result.crashed = True
wrapped_testcase_suite.result.process_result(
- wrapped_testcase_suite.last_test_id, ERROR
+ wrapped_testcase_suite.last_test_id, TestResultCode.ERROR
)
stop_run = (
process_finished_testsuite(
while testcase_suites:
results.append(TestResult(testcase_suites.pop(0)))
elif testcase_suites:
- a_suite = testcase_suites.pop(0)
+ a_suite = testcase_suites[0]
while a_suite and a_suite.is_tagged_run_solo:
+ testcase_suites.pop(0)
solo_testcase_suites.append(a_suite)
if testcase_suites:
- a_suite = testcase_suites.pop(0)
+ a_suite = testcase_suites[0]
else:
a_suite = None
if a_suite and can_run_suite(a_suite):
+ testcase_suites.pop(0)
run_suite(a_suite)
if solo_testcase_suites and tests_running == 0:
a_suite = solo_testcase_suites.pop(0)
if "." in f:
parts = f.split(".")
if len(parts) > 3:
- raise Exception("Unrecognized %s option: %s" % (test_option, f))
+ raise Exception(f"Invalid test filter: {test_filter}")
if len(parts) > 2:
if parts[2] not in ("*", ""):
filter_func_name = parts[2]
class FilterByTestOption:
- def __init__(self, filter_file_name, filter_class_name, filter_func_name):
- self.filter_file_name = filter_file_name
- self.filter_class_name = filter_class_name
- self.filter_func_name = filter_func_name
+ def __init__(self, filters):
+ self.filters = filters
def __call__(self, file_name, class_name, func_name):
- if self.filter_file_name:
- fn_match = fnmatch.fnmatch(file_name, self.filter_file_name)
- if not fn_match:
+ def test_one(
+ filter_file_name,
+ filter_class_name,
+ filter_func_name,
+ file_name,
+ class_name,
+ func_name,
+ ):
+ if filter_file_name:
+ fn_match = fnmatch.fnmatch(file_name, filter_file_name)
+ if not fn_match:
+ return False
+ if filter_class_name and class_name != filter_class_name:
return False
- if self.filter_class_name and class_name != self.filter_class_name:
- return False
- if self.filter_func_name and func_name != self.filter_func_name:
- return False
- return True
+ if filter_func_name and func_name != filter_func_name:
+ return False
+ return True
+
+ for filter_file_name, filter_class_name, filter_func_name in self.filters:
+ if test_one(
+ filter_file_name,
+ filter_class_name,
+ filter_func_name,
+ file_name,
+ class_name,
+ func_name,
+ ):
+ return True
+
+ return False
class FilterByClassList:
super(AllResults, self).__init__()
self.all_testcases = 0
self.results_per_suite = []
- self[PASS] = 0
- self[FAIL] = 0
- self[ERROR] = 0
- self[SKIP] = 0
- self[SKIP_CPU_SHORTAGE] = 0
- self[TEST_RUN] = 0
+ for trc in list(TestResultCode):
+ self[trc] = 0
self.rerun = []
self.testsuites_no_tests_run = []
def add_results(self, result):
self.results_per_suite.append(result)
- result_types = [PASS, FAIL, ERROR, SKIP, TEST_RUN, SKIP_CPU_SHORTAGE]
- for result_type in result_types:
- self[result_type] += len(result[result_type])
+ for trc in list(TestResultCode):
+ self[trc] += len(result[trc])
def add_result(self, result):
retval = 0
indent_results(
[
f"Scheduled tests: {self.all_testcases}",
- f"Executed tests: {self[TEST_RUN]}",
- f"Passed tests: {colorize(self[PASS], GREEN)}",
- f"Skipped tests: {colorize(self[SKIP], YELLOW)}"
- if self[SKIP]
+ f"Executed tests: {self[TestResultCode.TEST_RUN]}",
+ f"Passed tests: {colorize(self[TestResultCode.PASS], GREEN)}",
+ f"Expected failures: {colorize(self[TestResultCode.EXPECTED_FAIL], GREEN)}"
+ if self[TestResultCode.EXPECTED_FAIL]
+ else None,
+ f"Skipped tests: {colorize(self[TestResultCode.SKIP], YELLOW)}"
+ if self[TestResultCode.SKIP]
else None,
f"Not Executed tests: {colorize(self.not_executed, RED)}"
if self.not_executed
else None,
- f"Failures: {colorize(self[FAIL], RED)}" if self[FAIL] else None,
- f"Errors: {colorize(self[ERROR], RED)}" if self[ERROR] else None,
+ f"Failures: {colorize(self[TestResultCode.FAIL], RED)}"
+ if self[TestResultCode.FAIL]
+ else None,
+ f"Unexpected passes: {colorize(self[TestResultCode.UNEXPECTED_PASS], RED)}"
+ if self[TestResultCode.UNEXPECTED_PASS]
+ else None,
+ f"Errors: {colorize(self[TestResultCode.ERROR], RED)}"
+ if self[TestResultCode.ERROR]
+ else None,
"Tests skipped due to lack of CPUS: "
- f"{colorize(self[SKIP_CPU_SHORTAGE], YELLOW)}"
- if self[SKIP_CPU_SHORTAGE]
+ f"{colorize(self[TestResultCode.SKIP_CPU_SHORTAGE], YELLOW)}"
+ if self[TestResultCode.SKIP_CPU_SHORTAGE]
else None,
]
)
if self.all_failed > 0:
print("FAILURES AND ERRORS IN TESTS:")
for result in self.results_per_suite:
- failed_testcase_ids = result[FAIL]
- errored_testcase_ids = result[ERROR]
old_testcase_name = None
- if failed_testcase_ids:
- for failed_test_id in failed_testcase_ids:
+ for tr_code, headline in (
+ (TestResultCode.FAIL, "FAILURE"),
+ (TestResultCode.ERROR, "ERROR"),
+ (TestResultCode.UNEXPECTED_PASS, "UNEXPECTED PASS"),
+ ):
+ if not result[tr_code]:
+ continue
+
+ for failed_test_id in result[tr_code]:
new_testcase_name, test_name = result.get_testcase_names(
failed_test_id
)
if new_testcase_name != old_testcase_name:
print(
- " Testcase name: {}".format(
- colorize(new_testcase_name, RED)
- )
+ f" Testcase name: {colorize(new_testcase_name, RED)}"
)
old_testcase_name = new_testcase_name
print(
- " FAILURE: {} [{}]".format(
- colorize(test_name, RED), failed_test_id
- )
- )
- if errored_testcase_ids:
- for errored_test_id in errored_testcase_ids:
- new_testcase_name, test_name = result.get_testcase_names(
- errored_test_id
- )
- if new_testcase_name != old_testcase_name:
- print(
- " Testcase name: {}".format(
- colorize(new_testcase_name, RED)
- )
- )
- old_testcase_name = new_testcase_name
- print(
- " ERROR: {} [{}]".format(
- colorize(test_name, RED), errored_test_id
- )
+ f" {headline}: {colorize(test_name, RED)} [{failed_test_id}]"
)
+
if self.testsuites_no_tests_run:
print("TESTCASES WHERE NO TESTS WERE SUCCESSFULLY EXECUTED:")
tc_classes = set()
for tc_class in tc_classes:
print(" {}".format(colorize(tc_class, RED)))
- if self[SKIP_CPU_SHORTAGE]:
+ if self[TestResultCode.SKIP_CPU_SHORTAGE]:
print()
print(
colorize(
@property
def not_executed(self):
- return self.all_testcases - self[TEST_RUN]
+ return self.all_testcases - self[TestResultCode.TEST_RUN]
@property
def all_failed(self):
- return self[FAIL] + self[ERROR]
+ return (
+ self[TestResultCode.FAIL]
+ + self[TestResultCode.ERROR]
+ + self[TestResultCode.UNEXPECTED_PASS]
+ )
def parse_results(results):
if __name__ == "__main__":
-
print(f"Config is: {config}")
if config.sanity:
descriptions = True
print("Running tests using custom test runner.")
- filter_file, filter_class, filter_func = parse_test_filter(config.filter)
+ filters = [(parse_test_filter(f)) for f in config.filter.split(",")]
print(
- "Selected filters: file=%s, class=%s, function=%s"
- % (filter_file, filter_class, filter_func)
+ "Selected filters: ",
+ "|".join(
+ f"file={filter_file}, class={filter_class}, function={filter_func}"
+ for filter_file, filter_class, filter_func in filters
+ ),
)
- filter_cb = FilterByTestOption(filter_file, filter_class, filter_func)
+ filter_cb = FilterByTestOption(filters)
- ignore_path = config.venv_dir
cb = SplitToSuitesCallback(filter_cb)
for d in config.test_src_dir:
print("Adding tests from directory tree %s" % d)
- discover_tests(d, cb, ignore_path)
+ discover_tests(d, cb)
# suites are not hashable, need to use list
suites = []