From b9d32674eb15ab62109c608147f44f5dd5e8c77b Mon Sep 17 00:00:00 2001 From: Sam Clegg Date: Wed, 25 Feb 2026 16:16:08 -0800 Subject: [PATCH] Combine all tests into a single test suite. NFC This allows all tests to be run in parallel. For example: ``` Test suites: ['test_core', 'test_jslib', 'test_other'] Running test_core: (88 tests) Using 88 parallel test processes [88/88] test_asan_vector (test_core.core0.test_asan_vector) ... ok ---------------------------------------------------------------------- Ran 88 tests in 3.296s OK (skipped=5) Running test_jslib: (54 tests) Using 54 parallel test processes [54/54] test_jslib_aliases_closure_wasm64 (test_jslib.jslib.test_jslib_aliases_closure_wasm64) ... ok ---------------------------------------------------------------------- Ran 54 tests in 5.287s OK Running test_other: (41 tests) Using 41 parallel test processes [41/41] test_abspaths (test_other.other.test_abspaths) ... ok ---------------------------------------------------------------------- Ran 41 tests in 5.210s OK Total core time: 228.881s. Wallclock time: 13.795s. Parallelization: 16.59x. ==================== TEST SUMMARY test_core: 88 run, 0 errors, 0 failures, 5 skipped test_jslib: 54 run, 0 errors, 0 failures, 0 skipped test_other: 41 run, 0 errors, 0 failures, 0 skipped ``` After: ``` $ ./test/runner jslib other.test_a* core0.test_a* --skip-slow Running 183 tests Using 128 parallel test processes [183/183] test_abspaths (test_other.other.test_abspaths) ... ok ---------------------------------------------------------------------- Ran 183 tests in 7.459s OK (skipped=5) Total core time: 301.490s. Wallclock time: 7.459s. Parallelization: 40.42x. ``` Note the wall clock time is less since we ran all the tests in parallel. If you try to mix parallel and non-parallel test module you now get an error. e.g.: ``` $ ./test/runner core0 benchmark runner: error: attempt to mix parallel and non-parallel test modules ``` --- .circleci/config.yml | 7 ++-- test/runner.py | 82 +++++++++++++++++++++----------------------- test/test_sanity.py | 3 +- 3 files changed, 45 insertions(+), 47 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f58cd3c37c352..1f0dcd013efca 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1097,9 +1097,12 @@ jobs: - run-tests-linux: # some native-dependent tests fail because of the lack of native # headers on emsdk-bundled clang + test_targets: "other jslib skip:other.test_native_link_error_message" + - run-tests-linux: # Include a single test from test_benchmark.py to ensure it's always - # kept functional. - test_targets: "other jslib benchmark.test_primes skip:other.test_native_link_error_message" + # kept functional. Must be run separately from above because we cannot + # run non-parallel tests in the same run as parallel tests. + test_targets: "benchmark.test_primes" test-browser-chrome: executor: ubuntu-lts environment: diff --git a/test/runner.py b/test/runner.py index ff3419d96eaa1..7fa8f4cfa4184 100755 --- a/test/runner.py +++ b/test/runner.py @@ -368,13 +368,30 @@ def sort_tests_failing_and_slowest_first_comparator(x, y): return sort_tests_failing_and_slowest_first_comparator -def load_test_suites(args, modules, options): +def use_parallel_suite(module): + suite_supported = module.__name__ not in ('test_sanity', 'test_benchmark', 'test_sockets', 'test_interactive', 'test_stress') + if not common.EMTEST_SAVE_DIR and not shared.DEBUG: + has_multiple_cores = parallel_testsuite.num_cores() > 1 + if suite_supported and has_multiple_cores: + return True + return False + + +def create_test_suite(is_parallel, options): + if is_parallel: + return parallel_testsuite.ParallelTestSuite(options) + else: + return unittest.TestSuite() + + +def load_test_suite(args, modules, options): found_start = not options.start_at loader = unittest.TestLoader() error_on_legacy_suite_names(args) unmatched_test_names = set(args) - suites = [] + suite = None + using_parallel_suite = False total_tests = 0 for m in modules: @@ -394,7 +411,15 @@ def load_test_suites(args, modules, options): loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m) tests = flattened_tests(loaded_tests) - suite = suite_for_module(m, tests, options) + is_parallel_module = use_parallel_suite(m) + if not suite: + # The first module we encounter dictates whether we use parallel test suite + suite = create_test_suite(is_parallel_module, options) + using_parallel_suite = is_parallel_module + else: + # All the following modules must match in their support for the parallel runner. + if is_parallel_module != using_parallel_suite: + utils.exit_with_error(f'attempt to mix parallel and non-parallel test modules ({m.__name__})') if options.failing_and_slow_first: tests = sorted(tests, key=cmp_to_key(create_test_run_sorter(options.max_failures < len(tests) / 2))) for test in tests: @@ -407,10 +432,9 @@ def load_test_suites(args, modules, options): for _x in range(options.repeat): total_tests += 1 suite.addTest(test) - suites.append((m.__name__, suite)) if not found_start: utils.exit_with_error(f'unable to find --start-at test: {options.start_at}') - return suites, unmatched_test_names + return suite, unmatched_test_names def flattened_tests(loaded_tests): @@ -420,24 +444,8 @@ def flattened_tests(loaded_tests): return tests -def suite_for_module(module, tests, options): - suite_supported = module.__name__ not in ('test_sanity', 'test_benchmark', 'test_sockets', 'test_interactive', 'test_stress') - if not common.EMTEST_SAVE_DIR and not shared.DEBUG: - has_multiple_tests = len(tests) > 1 - has_multiple_cores = parallel_testsuite.num_cores() > 1 - if suite_supported and has_multiple_tests and has_multiple_cores: - return parallel_testsuite.ParallelTestSuite(options) - return unittest.TestSuite() - - -def run_tests(options, suites): - resultMessages = [] - num_failures = 0 - - if len(suites) > 1: - print('Test suites:', [s[0] for s in suites]) +def run_tests(options, suite): # Run the discovered tests - if os.getenv('CI'): # output fd must remain open until after testRunner.run() below output = open('out/test-results.xml', 'wb') @@ -456,27 +464,15 @@ def run_tests(options, suites): print('using verbose test runner (verbose output requested)') testRunner = ColorTextRunner(failfast=options.failfast) - total_core_time = 0 run_start_time = time.perf_counter() - for mod_name, suite in suites: - errlog('Running %s: (%s tests)' % (mod_name, suite.countTestCases())) - res = testRunner.run(suite) - msg = ('%s: %s run, %s errors, %s failures, %s skipped' % - (mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped))) - num_failures += len(res.errors) + len(res.failures) + len(res.unexpectedSuccesses) - resultMessages.append(msg) - if hasattr(res, 'core_time'): - total_core_time += res.core_time - total_run_time = time.perf_counter() - run_start_time - if total_core_time > 0: - errlog('Total core time: %.3fs. Wallclock time: %.3fs. Parallelization: %.2fx.' % (total_core_time, total_run_time, total_core_time / total_run_time)) - if len(resultMessages) > 1: - errlog('====================') - errlog() - errlog('TEST SUMMARY') - for msg in resultMessages: - errlog(' ' + msg) + errlog('Running %s tests' % suite.countTestCases()) + res = testRunner.run(suite) + num_failures = len(res.errors) + len(res.failures) + len(res.unexpectedSuccesses) + + total_run_time = time.perf_counter() - run_start_time + if hasattr(res, 'core_time'): + errlog('Total core time: %.3fs. Wallclock time: %.3fs. Parallelization: %.2fx.' % (res.core_time, total_run_time, res.core_time / total_run_time)) if options.bell: sys.stdout.write('\a') @@ -746,12 +742,12 @@ def prepend_default(arg): if os.path.exists(common.LAST_TEST): options.start_at = utils.read_file(common.LAST_TEST).strip() - suites, unmatched_tests = load_test_suites(tests, modules, options) + suite, unmatched_tests = load_test_suite(tests, modules, options) if unmatched_tests: errlog('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests)) return 1 - num_failures = run_tests(options, suites) + num_failures = run_tests(options, suite) # Return the number of failures as the process exit code # for automating success/failure reporting. Return codes # over 125 are not well supported on UNIX. diff --git a/test/test_sanity.py b/test/test_sanity.py index 310a40d4abd9d..a409feeb54e78 100644 --- a/test/test_sanity.py +++ b/test/test_sanity.py @@ -24,7 +24,7 @@ path_from_root, test_file, ) -from decorators import crossplatform, no_windows, parameterized, with_env_modify +from decorators import no_windows, parameterized, with_env_modify from tools import cache, ports, response_file, shared, utils from tools.config import EM_CONFIG @@ -182,7 +182,6 @@ def check_working(self, command, expected=None, env=None): return output # this should be the very first thing that runs. if this fails, everything else is irrelevant! - @crossplatform def test_aaa_normal(self): # Your existing EM_CONFIG should work! restore_and_set_up()