From e6a133fe26d958484dc9e3f8015a29f73fc5bd1e Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 13 Dec 2024 18:16:49 +0100 Subject: [PATCH] Auto-format Python code with ruff format --- .circleci/config.yml | 1 + .style.yapf | 3 - bootstrap.py | 22 +- docs/process.md | 5 +- em-config.py | 4 +- emar.py | 3 +- embuilder.py | 212 +- emcc.py | 244 +- emcmake.py | 12 +- emconfigure.py | 7 +- emmake.py | 7 +- emrun.py | 681 ++- emsize.py | 3 +- emstrip.py | 3 +- emsymbolizer.py | 50 +- pyproject.toml | 4 + requirements-dev.txt | 2 +- site/source/get_api_items.py | 132 +- site/source/get_wiki.py | 322 +- system/bin/sdl-config.py | 1 - system/bin/sdl2-config.py | 1 - test/benchmark/benchmark_sse.py | 372 +- test/clang_native.py | 38 +- test/common.py | 385 +- test/gen_large_switchcase.py | 14 +- test/gen_many_js_functions.py | 13 +- test/jsrun.py | 28 +- test/other/ports/external.py | 7 +- test/parallel_testsuite.py | 13 +- test/runner.py | 76 +- test/test_benchmark.py | 564 +- test/test_browser.py | 4250 ++++++++++----- test/test_core.py | 2836 +++++++---- test/test_interactive.py | 364 +- test/test_other.py | 7089 +++++++++++++++++--------- test/test_posixtest.py | 29 +- test/test_posixtest_browser.py | 1 + test/test_sanity.py | 101 +- test/test_sockets.py | 194 +- tools/building.py | 140 +- tools/cache.py | 19 +- tools/clean_webconsole.py | 15 +- tools/colored_logger.py | 90 +- tools/config.py | 14 +- tools/config_template.py | 6 +- tools/create_dom_pk_codes.py | 341 +- tools/determinism_checker.py | 5 +- tools/diagnostics.py | 42 +- tools/emcoverage.py | 2 +- tools/emdump.py | 340 +- tools/emdwp.py | 3 +- tools/emnm.py | 3 +- tools/emprofile.py | 11 +- tools/emscripten.py | 142 +- tools/experimental/reproduceriter.py | 24 +- tools/extract_metadata.py | 29 +- tools/feature_matrix.py | 9 +- tools/file_packager.py | 163 +- tools/install.py | 12 +- tools/js_manipulation.py | 16 +- tools/js_optimizer.py | 71 +- tools/line_endings.py | 49 +- tools/link.py | 484 +- tools/maint/add_license.py | 28 +- tools/maint/create_entry_points.py | 4 +- tools/maint/create_release.py | 7 +- tools/maint/gen_sig_info.py | 133 +- tools/maint/gen_struct_info.py | 110 +- tools/maint/simde_update.py | 8 +- tools/maybe_wasm2js.py | 3 +- tools/minimal_runtime_shell.py | 55 +- tools/ports/__init__.py | 25 +- tools/ports/boost_headers.py | 8 +- tools/ports/bullet.py | 5 +- tools/ports/bzip2.py | 9 +- tools/ports/cocos2d.py | 68 +- tools/ports/contrib/glfw3.py | 24 +- tools/ports/freetype.py | 91 +- tools/ports/giflib.py | 25 +- tools/ports/harfbuzz.py | 6 +- tools/ports/icu.py | 48 +- tools/ports/libjpeg.py | 19 +- tools/ports/libpng.py | 8 +- tools/ports/mpg123.py | 4 +- tools/ports/regal.py | 112 +- tools/ports/sdl2_image.py | 37 +- tools/ports/sdl2_mixer.py | 6 +- tools/ports/sdl2_ttf.py | 2 +- tools/ports/vorbis.py | 10 +- tools/response_file.py | 19 +- tools/settings.py | 189 +- tools/shared.py | 43 +- tools/system_libs.py | 735 +-- tools/tempfiles.py | 4 +- tools/toolchain_profiler.py | 95 +- tools/utils.py | 2 +- tools/wasm-sourcemap.py | 59 +- tools/webassembly.py | 66 +- tools/webidl_binder.py | 368 +- 99 files changed, 14836 insertions(+), 7722 deletions(-) delete mode 100644 .style.yapf diff --git a/.circleci/config.yml b/.circleci/config.yml index 5c5844c7e8c63..06971a7f18144 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -425,6 +425,7 @@ jobs: - checkout - pip-install - run: ruff check + - run: ruff format mypy: executor: bionic steps: diff --git a/.style.yapf b/.style.yapf deleted file mode 100644 index de72b98c2c862..0000000000000 --- a/.style.yapf +++ /dev/null @@ -1,3 +0,0 @@ -[style] -based_on_style = pep8 -indent_width = 2 diff --git a/bootstrap.py b/bootstrap.py index ff9d55dc129a2..bfd95fb7b5b3a 100755 --- a/bootstrap.py +++ b/bootstrap.py @@ -7,6 +7,7 @@ on the timestamps of various input files (kind of like a dumb version of a Makefile). """ + import argparse import os import shutil @@ -21,13 +22,16 @@ actions = [ ('npm packages', ['package.json'], [shutil.which('npm'), 'ci']), - ('create entry points', [ - 'tools/maint/create_entry_points.py', - 'tools/maint/run_python.bat', - 'tools/maint/run_python.sh', - 'tools/maint/run_python.ps1', - ], - [sys.executable, 'tools/maint/create_entry_points.py']), + ( + 'create entry points', + [ + 'tools/maint/create_entry_points.py', + 'tools/maint/run_python.bat', + 'tools/maint/run_python.sh', + 'tools/maint/run_python.ps1', + ], + [sys.executable, 'tools/maint/create_entry_points.py'], + ), ('git submodules', ['test/third_party/posixtestsuite/'], [shutil.which('git'), 'submodule', 'update', '--init']), ] @@ -57,7 +61,9 @@ def main(args): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-v', '--verbose', action='store_true', help='verbose', default=False) parser.add_argument('-n', '--dry-run', action='store_true', help='dry run', default=False) - parser.add_argument('-i', '--install-post-checkout', action='store_true', help='install post checkout script', default=False) + parser.add_argument( + '-i', '--install-post-checkout', action='store_true', help='install post checkout script', default=False + ) args = parser.parse_args() if args.install_post_checkout: diff --git a/docs/process.md b/docs/process.md index f1bf04a714a11..5a5a7f2ff8243 100644 --- a/docs/process.md +++ b/docs/process.md @@ -55,8 +55,9 @@ pre-processor. See [`.clang-format`][clang-format] for more details. ### Python Code We generally follow the pep8 standard with the major exception that we use 2 -spaces for indentation. `ruff` is run on all PRs to ensure that Python code -conforms to this style. See [`pyproject.toml`][pyproject.toml] for more details. +spaces for indentation. `ruff check` and `ruff format` are run on all PRs to +ensure that Python code conforms to this style. See +[`pyproject.toml`][pyproject.toml] for more details. #### Static Type Checking diff --git a/em-config.py b/em-config.py index bcbd9abe1cf8d..73e4e009bfd47 100755 --- a/em-config.py +++ b/em-config.py @@ -20,9 +20,7 @@ def main(): - if len(sys.argv) != 2 or \ - not re.match(r"^[\w\W_][\w\W_\d]*$", sys.argv[1]) or \ - not hasattr(config, sys.argv[1]): + if len(sys.argv) != 2 or not re.match(r"^[\w\W_][\w\W_\d]*$", sys.argv[1]) or not hasattr(config, sys.argv[1]): print('Usage: em-config VAR_NAME', file=sys.stderr) sys.exit(1) diff --git a/emar.py b/emar.py index 061fc5d4ce927..88625b01407c1 100755 --- a/emar.py +++ b/emar.py @@ -4,8 +4,7 @@ # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. -"""Wrapper script around `llvm-ar`. -""" +"""Wrapper script around `llvm-ar`.""" import sys from tools import shared diff --git a/embuilder.py b/embuilder.py index 4b706c1d6df76..e5b426c05a551 100755 --- a/embuilder.py +++ b/embuilder.py @@ -30,96 +30,96 @@ # Minimal subset of targets used by CI systems to build enough to be useful MINIMAL_TASKS = [ - 'libbulkmemory', - 'libcompiler_rt', - 'libcompiler_rt-wasm-sjlj', - 'libcompiler_rt-ww', - 'libc', - 'libc-debug', - 'libc-ww-debug', - 'libc_optz', - 'libc_optz-debug', - 'libc++abi', - 'libc++abi-except', - 'libc++abi-noexcept', - 'libc++abi-debug', - 'libc++abi-debug-except', - 'libc++abi-debug-noexcept', - 'libc++abi-debug-ww-noexcept', - 'libc++', - 'libc++-except', - 'libc++-noexcept', - 'libc++-ww-noexcept', - 'libal', - 'libdlmalloc', - 'libdlmalloc-tracing', - 'libdlmalloc-debug', - 'libdlmalloc-ww', - 'libembind', - 'libembind-rtti', - 'libemmalloc', - 'libemmalloc-debug', - 'libemmalloc-memvalidate', - 'libemmalloc-verbose', - 'libemmalloc-memvalidate-verbose', - 'libmimalloc', - 'libmimalloc-mt', - 'libGL', - 'libGL-getprocaddr', - 'libGL-emu-getprocaddr', - 'libGL-emu-webgl2-ofb-getprocaddr', - 'libGL-webgl2-ofb-getprocaddr', - 'libGL-ww-getprocaddr', - 'libhtml5', - 'libsockets', - 'libsockets-ww', - 'libstubs', - 'libstubs-debug', - 'libstandalonewasm-nocatch', - 'crt1', - 'crt1_proxy_main', - 'crtbegin', - 'libunwind-except', - 'libnoexit', - 'sqlite3', - 'sqlite3-mt', - 'libwebgpu', - 'libwebgpu_cpp', + 'libbulkmemory', + 'libcompiler_rt', + 'libcompiler_rt-wasm-sjlj', + 'libcompiler_rt-ww', + 'libc', + 'libc-debug', + 'libc-ww-debug', + 'libc_optz', + 'libc_optz-debug', + 'libc++abi', + 'libc++abi-except', + 'libc++abi-noexcept', + 'libc++abi-debug', + 'libc++abi-debug-except', + 'libc++abi-debug-noexcept', + 'libc++abi-debug-ww-noexcept', + 'libc++', + 'libc++-except', + 'libc++-noexcept', + 'libc++-ww-noexcept', + 'libal', + 'libdlmalloc', + 'libdlmalloc-tracing', + 'libdlmalloc-debug', + 'libdlmalloc-ww', + 'libembind', + 'libembind-rtti', + 'libemmalloc', + 'libemmalloc-debug', + 'libemmalloc-memvalidate', + 'libemmalloc-verbose', + 'libemmalloc-memvalidate-verbose', + 'libmimalloc', + 'libmimalloc-mt', + 'libGL', + 'libGL-getprocaddr', + 'libGL-emu-getprocaddr', + 'libGL-emu-webgl2-ofb-getprocaddr', + 'libGL-webgl2-ofb-getprocaddr', + 'libGL-ww-getprocaddr', + 'libhtml5', + 'libsockets', + 'libsockets-ww', + 'libstubs', + 'libstubs-debug', + 'libstandalonewasm-nocatch', + 'crt1', + 'crt1_proxy_main', + 'crtbegin', + 'libunwind-except', + 'libnoexit', + 'sqlite3', + 'sqlite3-mt', + 'libwebgpu', + 'libwebgpu_cpp', ] # Additional tasks on top of MINIMAL_TASKS that are necessary for PIC testing on # CI (which has slightly more tests than other modes that want to use MINIMAL) MINIMAL_PIC_TASKS = MINIMAL_TASKS + [ - 'libcompiler_rt-mt', - 'libc-mt', - 'libc-mt-debug', - 'libc_optz-mt', - 'libc_optz-mt-debug', - 'libc++abi-mt', - 'libc++abi-mt-noexcept', - 'libc++abi-debug-mt', - 'libc++abi-debug-mt-noexcept', - 'libc++-mt', - 'libc++-mt-noexcept', - 'libdlmalloc-mt', - 'libGL-emu', - 'libGL-emu-webgl2-getprocaddr', - 'libGL-mt-getprocaddr', - 'libGL-mt-emu', - 'libGL-mt-emu-webgl2-getprocaddr', - 'libGL-mt-emu-webgl2-ofb-getprocaddr', - 'libsockets_proxy', - 'libsockets-mt', - 'crtbegin', - 'libsanitizer_common_rt', - 'libubsan_rt', - 'libwasm_workers-debug-stub', - 'libfetch', - 'libfetch-mt', - 'libwasmfs', - 'libwasmfs-debug', - 'libwasmfs_no_fs', - 'giflib', + 'libcompiler_rt-mt', + 'libc-mt', + 'libc-mt-debug', + 'libc_optz-mt', + 'libc_optz-mt-debug', + 'libc++abi-mt', + 'libc++abi-mt-noexcept', + 'libc++abi-debug-mt', + 'libc++abi-debug-mt-noexcept', + 'libc++-mt', + 'libc++-mt-noexcept', + 'libdlmalloc-mt', + 'libGL-emu', + 'libGL-emu-webgl2-getprocaddr', + 'libGL-mt-getprocaddr', + 'libGL-mt-emu', + 'libGL-mt-emu-webgl2-getprocaddr', + 'libGL-mt-emu-webgl2-ofb-getprocaddr', + 'libsockets_proxy', + 'libsockets-mt', + 'crtbegin', + 'libsanitizer_common_rt', + 'libubsan_rt', + 'libwasm_workers-debug-stub', + 'libfetch', + 'libfetch-mt', + 'libwasmfs', + 'libwasmfs-debug', + 'libwasmfs_no_fs', + 'giflib', ] PORTS = sorted(list(ports.ports_by_name.keys()) + list(ports.port_variants.keys())) @@ -187,19 +187,17 @@ def handle_port_error(target, message): def main(): all_build_start_time = time.time() - parser = argparse.ArgumentParser(description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=get_help()) + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=get_help() + ) parser.add_argument('--lto', action='store_const', const='full', help='build bitcode object for LTO') - parser.add_argument('--lto=thin', dest='lto', action='store_const', const='thin', help='build bitcode object for ThinLTO') - parser.add_argument('--pic', action='store_true', - help='build relocatable objects for suitable for dynamic linking') - parser.add_argument('--force', action='store_true', - help='force rebuild of target (by removing it first)') - parser.add_argument('--verbose', action='store_true', - help='show build commands') - parser.add_argument('--wasm64', action='store_true', - help='use wasm64 architecture') + parser.add_argument( + '--lto=thin', dest='lto', action='store_const', const='thin', help='build bitcode object for ThinLTO' + ) + parser.add_argument('--pic', action='store_true', help='build relocatable objects for suitable for dynamic linking') + parser.add_argument('--force', action='store_true', help='force rebuild of target (by removing it first)') + parser.add_argument('--verbose', action='store_true', help='show build commands') + parser.add_argument('--wasm64', action='store_true', help='use wasm64 architecture') parser.add_argument('operation', choices=['build', 'clear', 'rebuild']) parser.add_argument('targets', nargs='*', help='see below') args = parser.parse_args() @@ -240,7 +238,7 @@ def main(): # process tasks auto_tasks = False - task_targets = dict.fromkeys(args.targets) # use dict to keep targets order + task_targets = dict.fromkeys(args.targets) # use dict to keep targets order # substitute predefined_tasks = { @@ -315,14 +313,28 @@ def main(): return 1 time_taken = time.time() - start_time - logger.info('...success. Took %s(%.2fs)' % (('%02d:%02d mins ' % (time_taken // 60, time_taken % 60) if time_taken >= 60 else ''), time_taken)) + logger.info( + '...success. Took %s(%.2fs)' + % (('%02d:%02d mins ' % (time_taken // 60, time_taken % 60) if time_taken >= 60 else ''), time_taken) + ) if USE_NINJA and args.operation != 'clear': system_libs.build_deferred() if len(tasks) > 1 or USE_NINJA: all_build_time_taken = time.time() - all_build_start_time - logger.info('Built %d targets in %s(%.2fs)' % (len(tasks), ('%02d:%02d mins ' % (all_build_time_taken // 60, all_build_time_taken % 60) if all_build_time_taken >= 60 else ''), all_build_time_taken)) + logger.info( + 'Built %d targets in %s(%.2fs)' + % ( + len(tasks), + ( + '%02d:%02d mins ' % (all_build_time_taken // 60, all_build_time_taken % 60) + if all_build_time_taken >= 60 + else '' + ), + all_build_time_taken, + ) + ) return 0 diff --git a/emcc.py b/emcc.py index 2925985f22f6c..ff1ff4662bd7e 100644 --- a/emcc.py +++ b/emcc.py @@ -56,6 +56,7 @@ # run already. if os.path.exists(utils.path_from_root('.git')) and os.path.exists(utils.path_from_root('bootstrap.py')): import bootstrap + bootstrap.check() # endings = dot + a suffix, compare against result of shared.suffix() @@ -65,7 +66,7 @@ PREPROCESSED_ENDINGS = ['.i', '.ii'] OBJCXX_ENDINGS = ['.mm', '.mii'] SPECIAL_ENDINGLESS_FILENAMES = [os.devnull] -C_ENDINGS += SPECIAL_ENDINGLESS_FILENAMES # consider the special endingless filenames like /dev/null to be C +C_ENDINGS += SPECIAL_ENDINGLESS_FILENAMES # consider the special endingless filenames like /dev/null to be C SOURCE_ENDINGS = C_ENDINGS + CXX_ENDINGS + OBJC_ENDINGS + OBJCXX_ENDINGS + ['.bc', '.ll', '.S'] ASSEMBLY_ENDINGS = ['.s'] @@ -73,20 +74,34 @@ # These symbol names are allowed in INCOMING_MODULE_JS_API but are not part of the # default set. -EXTRA_INCOMING_JS_API = [ - 'fetchSettings' -] +EXTRA_INCOMING_JS_API = ['fetchSettings'] SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-msse4', '-mavx'] SIMD_NEON_FLAGS = ['-mfpu=neon'] LINK_ONLY_FLAGS = { - '--bind', '--closure', '--cpuprofiler', '--embed-file', - '--emit-symbol-map', '--emrun', '--exclude-file', '--extern-post-js', - '--extern-pre-js', '--ignore-dynamic-linking', '--js-library', - '--js-transform', '--oformat', '--output_eol', - '--post-js', '--pre-js', '--preload-file', '--profiling-funcs', - '--proxy-to-worker', '--shell-file', '--source-map-base', - '--threadprofiler', '--use-preload-plugins' + '--bind', + '--closure', + '--cpuprofiler', + '--embed-file', + '--emit-symbol-map', + '--emrun', + '--exclude-file', + '--extern-post-js', + '--extern-pre-js', + '--ignore-dynamic-linking', + '--js-library', + '--js-transform', + '--oformat', + '--output_eol', + '--post-js', + '--pre-js', + '--preload-file', + '--profiling-funcs', + '--proxy-to-worker', + '--shell-file', + '--source-map-base', + '--threadprofiler', + '--use-preload-plugins', } @@ -144,10 +159,10 @@ def __init__(self): self.use_closure_compiler = None self.closure_args = [] self.js_transform = None - self.pre_js = [] # before all js - self.post_js = [] # after all js - self.extern_pre_js = [] # before all js, external to optimized code - self.extern_post_js = [] # after all js, external to optimized code + self.pre_js = [] # before all js + self.post_js = [] # after all js + self.extern_pre_js = [] # before all js, external to optimized code + self.extern_post_js = [] # after all js, external to optimized code self.preload_files = [] self.embed_files = [] self.exclude_files = [] @@ -218,13 +233,36 @@ def make_relative(filename): if ignore: continue - if arg in ('-MT', '-MF', '-MJ', '-MQ', '-D', '-U', '-o', '-x', - '-Xpreprocessor', '-include', '-imacros', '-idirafter', - '-iprefix', '-iwithprefix', '-iwithprefixbefore', - '-isysroot', '-imultilib', '-A', '-isystem', '-iquote', - '-install_name', '-compatibility_version', - '-current_version', '-I', '-L', '-include-pch', - '-Xlinker', '-Xclang'): + if arg in ( + '-MT', + '-MF', + '-MJ', + '-MQ', + '-D', + '-U', + '-o', + '-x', + '-Xpreprocessor', + '-include', + '-imacros', + '-idirafter', + '-iprefix', + '-iwithprefix', + '-iwithprefixbefore', + '-isysroot', + '-imultilib', + '-A', + '-isystem', + '-iquote', + '-install_name', + '-compatibility_version', + '-current_version', + '-I', + '-L', + '-include-pch', + '-Xlinker', + '-Xclang', + ): ignore_next = True if arg == '-o': @@ -244,7 +282,7 @@ def expand_byte_size_suffixes(value): value, suffix = match.groups() value = int(value) if suffix: - size_suffixes = {suffix: 1024 ** i for i, suffix in enumerate(['b', 'kb', 'mb', 'gb', 'tb'])} + size_suffixes = {suffix: 1024**i for i, suffix in enumerate(['b', 'kb', 'mb', 'gb', 'tb'])} value *= size_suffixes[suffix.lower()] return value @@ -472,7 +510,11 @@ def array_contains_any_of(hay, needles): if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER) or array_contains_any_of(user_args, SIMD_NEON_FLAGS): if '-msimd128' not in user_args and '-mrelaxed-simd' not in user_args: - exit_with_error('passing any of ' + ', '.join(SIMD_INTEL_FEATURE_TOWER + SIMD_NEON_FLAGS) + ' flags also requires passing -msimd128 (or -mrelaxed-simd)!') + exit_with_error( + 'passing any of ' + + ', '.join(SIMD_INTEL_FEATURE_TOWER + SIMD_NEON_FLAGS) + + ' flags also requires passing -msimd128 (or -mrelaxed-simd)!' + ) cflags += ['-D__SSE__=1'] if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[1:]): @@ -588,7 +630,7 @@ def run(args): ''') return 0 - if '-dumpversion' in args: # gcc's doc states "Print the compiler version [...] and don't do anything else." + if '-dumpversion' in args: # gcc's doc states "Print the compiler version [...] and don't do anything else." print(utils.EMSCRIPTEN_VERSION) return 0 @@ -609,16 +651,22 @@ def run(args): if not lines: exit_with_error(f'unable to parse output of `{cmd}`:\n{proc.stderr}') parts = shlex.split(lines[0].replace('\\', '\\\\')) - parts = [x for x in parts if x not in ['-c', '-o', '-v', '-emit-llvm'] and input_file not in x and temp_target not in x] + parts = [ + x for x in parts if x not in ['-c', '-o', '-v', '-emit-llvm'] and input_file not in x and temp_target not in x + ] print(shared.shlex_join(parts[1:])) return 0 if 'EMMAKEN_NO_SDK' in os.environ: - exit_with_error('EMMAKEN_NO_SDK is no longer supported. The standard -nostdlib and -nostdinc flags should be used instead') + exit_with_error( + 'EMMAKEN_NO_SDK is no longer supported. The standard -nostdlib and -nostdinc flags should be used instead' + ) if 'EMMAKEN_COMPILER' in os.environ: - exit_with_error('`EMMAKEN_COMPILER` is no longer supported.\n' + - 'Please use the `LLVM_ROOT` and/or `COMPILER_WRAPPER` config settings instead') + exit_with_error( + '`EMMAKEN_COMPILER` is no longer supported.\n' + + 'Please use the `LLVM_ROOT` and/or `COMPILER_WRAPPER` config settings instead' + ) if 'EMMAKEN_CFLAGS' in os.environ: exit_with_error('`EMMAKEN_CFLAGS` is no longer supported, please use `EMCC_CFLAGS` instead') @@ -670,6 +718,7 @@ def run(args): exit_with_error('--post-link requires a single input file') # Delay import of link.py to avoid processing this file when only compiling from tools import link + link.run_post_link(input_files[0][1], options, state, newargs) return 0 @@ -679,6 +728,7 @@ def run(args): if state.mode == Mode.COMPILE_AND_LINK: # Delay import of link.py to avoid processing this file when only compiling from tools import link + return link.run(linker_inputs, options, state, newargs) else: logger.debug('stopping after compile phase') @@ -749,8 +799,7 @@ def phase_parse_arguments(state): @ToolchainProfiler.profile_block('setup') def phase_setup(options, state, newargs): - """Second phase: configure and setup the compiler based on the specified settings and arguments. - """ + """Second phase: configure and setup the compiler based on the specified settings and arguments.""" if settings.RUNTIME_LINKED_LIBS: newargs += settings.RUNTIME_LINKED_LIBS @@ -775,14 +824,39 @@ def phase_setup(options, state, newargs): continue arg = newargs[i] - if arg in {'-MT', '-MF', '-MJ', '-MQ', '-D', '-U', '-o', '-x', - '-Xpreprocessor', '-include', '-imacros', '-idirafter', - '-iprefix', '-iwithprefix', '-iwithprefixbefore', - '-isysroot', '-imultilib', '-A', '-isystem', '-iquote', - '-install_name', '-compatibility_version', - '-current_version', '-I', '-L', '-include-pch', - '-undefined', '-target', - '-Xlinker', '-Xclang', '-z'}: + if arg in { + '-MT', + '-MF', + '-MJ', + '-MQ', + '-D', + '-U', + '-o', + '-x', + '-Xpreprocessor', + '-include', + '-imacros', + '-idirafter', + '-iprefix', + '-iwithprefix', + '-iwithprefixbefore', + '-isysroot', + '-imultilib', + '-A', + '-isystem', + '-iquote', + '-install_name', + '-compatibility_version', + '-current_version', + '-I', + '-L', + '-include-pch', + '-undefined', + '-target', + '-Xlinker', + '-Xclang', + '-z', + }: skip = True if not arg.startswith('-'): @@ -792,7 +866,11 @@ def phase_setup(options, state, newargs): # python before 3.8: # https://bugs.python.org/issue1311 if not os.path.exists(arg) and arg != os.devnull: - exit_with_error('%s: No such file or directory ("%s" was expected to be an input file, based on the commandline arguments provided)', arg, arg) + exit_with_error( + '%s: No such file or directory ("%s" was expected to be an input file, based on the commandline arguments provided)', + arg, + arg, + ) file_suffix = get_file_suffix(arg) if file_suffix in HEADER_ENDINGS: has_header_inputs = True @@ -843,14 +921,10 @@ def phase_setup(options, state, newargs): if state.mode in (Mode.COMPILE_ONLY, Mode.PREPROCESS_ONLY): for key in user_settings: if key not in COMPILE_TIME_SETTINGS: - diagnostics.warning( - 'unused-command-line-argument', - "linker setting ignored during compilation: '%s'" % key) + diagnostics.warning('unused-command-line-argument', "linker setting ignored during compilation: '%s'" % key) for arg in state.orig_args: if arg in LINK_ONLY_FLAGS: - diagnostics.warning( - 'unused-command-line-argument', - "linker flag ignored during compilation: '%s'" % arg) + diagnostics.warning('unused-command-line-argument', "linker flag ignored during compilation: '%s'" % arg) if settings.MAIN_MODULE or settings.SIDE_MODULE: settings.RELOCATABLE = 1 @@ -870,7 +944,9 @@ def phase_setup(options, state, newargs): # on the command line. This is no longer valid so report either an error or a warning (for # backwards compat with the old `DISABLE_EXCEPTION_CATCHING=2` if user_settings['DISABLE_EXCEPTION_CATCHING'] in ('0', '2'): - diagnostics.warning('deprecated', 'DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED') + diagnostics.warning( + 'deprecated', 'DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED' + ) else: exit_with_error('DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive') @@ -885,18 +961,26 @@ def phase_setup(options, state, newargs): # -fwasm-exceptions takes care of enabling them, so users aren't supposed to # pass them explicitly, regardless of their values if 'DISABLE_EXCEPTION_CATCHING' in user_settings or 'DISABLE_EXCEPTION_THROWING' in user_settings: - diagnostics.warning('emcc', 'you no longer need to pass DISABLE_EXCEPTION_CATCHING or DISABLE_EXCEPTION_THROWING when using Wasm exceptions') + diagnostics.warning( + 'emcc', + 'you no longer need to pass DISABLE_EXCEPTION_CATCHING or DISABLE_EXCEPTION_THROWING when using Wasm exceptions', + ) settings.DISABLE_EXCEPTION_CATCHING = 1 settings.DISABLE_EXCEPTION_THROWING = 1 if user_settings.get('ASYNCIFY') == '1': - diagnostics.warning('emcc', 'ASYNCIFY=1 is not compatible with -fwasm-exceptions. Parts of the program that mix ASYNCIFY and exceptions will not compile.') + diagnostics.warning( + 'emcc', + 'ASYNCIFY=1 is not compatible with -fwasm-exceptions. Parts of the program that mix ASYNCIFY and exceptions will not compile.', + ) if user_settings.get('SUPPORT_LONGJMP') == 'emscripten': exit_with_error('SUPPORT_LONGJMP=emscripten is not compatible with -fwasm-exceptions') if settings.DISABLE_EXCEPTION_THROWING and not settings.DISABLE_EXCEPTION_CATCHING: - exit_with_error("DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0). If you don't want exceptions, set DISABLE_EXCEPTION_CATCHING to 1; if you do want exceptions, don't link with -fno-exceptions") + exit_with_error( + "DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0). If you don't want exceptions, set DISABLE_EXCEPTION_CATCHING to 1; if you do want exceptions, don't link with -fno-exceptions" + ) if options.target.startswith('wasm64'): default_setting('MEMORY64', 1) @@ -1042,7 +1126,10 @@ def get_clang_command_asm(): if options.output_file: cmd += ['-o', options.output_file] if get_file_suffix(options.output_file) == '.bc' and not settings.LTO and '-emit-llvm' not in state.orig_args: - diagnostics.warning('emcc', '.bc output file suffix used without -flto or -emit-llvm. Consider using .o extension since emcc will output an object file, not a bitcode file') + diagnostics.warning( + 'emcc', + '.bc output file suffix used without -flto or -emit-llvm. Consider using .o extension since emcc will output an object file, not a bitcode file', + ) shared.exec_process(cmd) assert False, 'exec_process does not return' @@ -1119,8 +1206,8 @@ def version_string(): revision_suffix = '' if os.path.exists(utils.path_from_root('.git')): git_rev = run_process( - ['git', 'rev-parse', 'HEAD'], - stdout=PIPE, stderr=PIPE, cwd=utils.path_from_root()).stdout.strip() + ['git', 'rev-parse', 'HEAD'], stdout=PIPE, stderr=PIPE, cwd=utils.path_from_root() + ).stdout.strip() revision_suffix = ' (%s)' % git_rev elif os.path.exists(utils.path_from_root('emscripten-revision.txt')): rev = read_file(utils.path_from_root('emscripten-revision.txt')).strip() @@ -1331,7 +1418,9 @@ def consume_arg_file(): elif check_flag('--use-preload-cache'): options.use_preload_cache = True elif check_flag('--no-heap-copy'): - diagnostics.warning('legacy-settings', 'ignoring legacy flag --no-heap-copy (that is the only mode supported now)') + diagnostics.warning( + 'legacy-settings', 'ignoring legacy flag --no-heap-copy (that is the only mode supported now)' + ) elif check_flag('--use-preload-plugins'): options.use_preload_plugins = True elif check_flag('--ignore-dynamic-linking'): @@ -1354,7 +1443,10 @@ def consume_arg_file(): elif check_arg('--js-library'): settings.JS_LIBRARIES.append((i + 1, os.path.abspath(consume_arg_file()))) elif check_flag('--remove-duplicates'): - diagnostics.warning('legacy-settings', '--remove-duplicates is deprecated as it is no longer needed. If you cannot link without it, file a bug with a testcase') + diagnostics.warning( + 'legacy-settings', + '--remove-duplicates is deprecated as it is no longer needed. If you cannot link without it, file a bug with a testcase', + ) elif check_flag('--jcache'): logger.error('jcache is no longer supported') elif check_arg('--cache'): @@ -1366,13 +1458,13 @@ def consume_arg_file(): elif check_flag('--clear-cache'): logger.info('clearing cache as requested by --clear-cache: `%s`', cache.cachedir) cache.erase() - shared.perform_sanity_checks() # this is a good time for a sanity check + shared.perform_sanity_checks() # this is a good time for a sanity check should_exit = True elif check_flag('--clear-ports'): logger.info('clearing ports and cache as requested by --clear-ports') ports.clear() cache.erase() - shared.perform_sanity_checks() # this is a good time for a sanity check + shared.perform_sanity_checks() # this is a good time for a sanity check should_exit = True elif check_flag('--check'): print(version_string(), file=sys.stderr) @@ -1397,10 +1489,12 @@ def consume_arg_file(): # that are e.g. x86 specific and non-portable. The emscripten bundled # headers are modified to be portable, local system ones are generally not. diagnostics.warning( - 'absolute-paths', f'-I or -L of an absolute path "{arg}" ' - 'encountered. If this is to a local system header/library, it may ' - 'cause problems (local system files make sense for compiling natively ' - 'on your system, but not necessarily to JavaScript).') + 'absolute-paths', + f'-I or -L of an absolute path "{arg}" ' + 'encountered. If this is to a local system header/library, it may ' + 'cause problems (local system files make sense for compiling natively ' + 'on your system, but not necessarily to JavaScript).', + ) elif check_flag('--emrun'): options.emrun = True elif check_flag('--cpuprofiler'): @@ -1413,16 +1507,12 @@ def consume_arg_file(): settings.WASM_EXCEPTIONS = 0 elif arg == '-mbulk-memory': settings.BULK_MEMORY = 1 - feature_matrix.enable_feature(feature_matrix.Feature.BULK_MEMORY, - '-mbulk-memory', - override=True) + feature_matrix.enable_feature(feature_matrix.Feature.BULK_MEMORY, '-mbulk-memory', override=True) elif arg == '-mno-bulk-memory': settings.BULK_MEMORY = 0 feature_matrix.disable_feature(feature_matrix.Feature.BULK_MEMORY) elif arg == '-msign-ext': - feature_matrix.enable_feature(feature_matrix.Feature.SIGN_EXT, - '-msign-ext', - override=True) + feature_matrix.enable_feature(feature_matrix.Feature.SIGN_EXT, '-msign-ext', override=True) elif arg == '-mno-sign-ext': feature_matrix.disable_feature(feature_matrix.Feature.SIGN_EXT) elif arg == '-fexceptions': @@ -1472,7 +1562,9 @@ def consume_arg_file(): else: value = '1' if key in settings.keys(): - exit_with_error(f'{arg}: cannot change built-in settings values with a -jsD directive. Pass -s{key}={value} instead!') + exit_with_error( + f'{arg}: cannot change built-in settings values with a -jsD directive. Pass -s{key}={value} instead!' + ) user_js_defines += [(key, value)] newargs[i] = '' elif check_flag('-shared'): @@ -1487,7 +1579,9 @@ def consume_arg_file(): elif check_arg('-target') or check_arg('--target'): options.target = consume_arg() if options.target not in ('wasm32', 'wasm64', 'wasm64-unknown-emscripten', 'wasm32-unknown-emscripten'): - exit_with_error(f'unsupported target: {options.target} (emcc only supports wasm64-unknown-emscripten and wasm32-unknown-emscripten)') + exit_with_error( + f'unsupported target: {options.target} (emcc only supports wasm64-unknown-emscripten and wasm32-unknown-emscripten)' + ) elif check_arg('--use-port'): ports.handle_use_port_arg(settings, consume_arg()) elif arg == '-mllvm': @@ -1541,7 +1635,9 @@ def parse_string_value(text): if first == "'" or first == '"': text = text.rstrip() if text[-1] != text[0] or len(text) < 2: - raise ValueError(f'unclosed quoted string. expected final character to be "{text[0]}" and length to be greater than 1 in "{text[0]}"') + raise ValueError( + f'unclosed quoted string. expected final character to be "{text[0]}" and length to be greater than 1 in "{text[0]}"' + ) return text[1:-1] return text @@ -1551,7 +1647,7 @@ def parse_string_list_members(text): result = [] index = 0 while True: - current = values[index].lstrip() # Cannot safely rstrip for cases like: "HERE-> ," + current = values[index].lstrip() # Cannot safely rstrip for cases like: "HERE-> ," if not len(current): raise ValueError('empty value in string list') first = current[0] @@ -1559,7 +1655,7 @@ def parse_string_list_members(text): result.append(current.rstrip()) else: start = index - while True: # Continue until closing quote found + while True: # Continue until closing quote found if index >= len(values): raise ValueError(f"unclosed quoted string. expected final character to be '{first}' in '{values[start]}'") new = values[index].rstrip() @@ -1629,7 +1725,9 @@ def validate_arg_level(level_string, max_level, err_msg, clamp=False): exit_with_error(err_msg) if clamp: if level > max_level: - logger.warning("optimization level '-O" + level_string + "' is not supported; using '-O" + str(max_level) + "' instead") + logger.warning( + "optimization level '-O" + level_string + "' is not supported; using '-O" + str(max_level) + "' instead" + ) level = max_level if not 0 <= level <= max_level: exit_with_error(err_msg) diff --git a/emcmake.py b/emcmake.py index a9dc2294b91c2..84b6c57c424db 100755 --- a/emcmake.py +++ b/emcmake.py @@ -18,12 +18,15 @@ # def run(): if len(sys.argv) < 2 or sys.argv[1] in ('--version', '--help'): - print('''\ + print( + '''\ emcmake is a helper for cmake, setting various environment variables so that emcc etc. are used. Typical usage: emcmake cmake [FLAGS] -''', file=sys.stderr) +''', + file=sys.stderr, + ) return 1 args = sys.argv[1:] @@ -53,7 +56,10 @@ def has_substr(args, substr): elif shutil.which('ninja'): args += ['-G', 'Ninja'] else: - print('emcmake: no compatible cmake generator found; Please install ninja or mingw32-make, or specify a generator explicitly using -G', file=sys.stderr) + print( + 'emcmake: no compatible cmake generator found; Please install ninja or mingw32-make, or specify a generator explicitly using -G', + file=sys.stderr, + ) return 1 print('configure: ' + shared.shlex_join(args), file=sys.stderr) diff --git a/emconfigure.py b/emconfigure.py index deba798069bb3..82d096983c9cf 100755 --- a/emconfigure.py +++ b/emconfigure.py @@ -27,13 +27,16 @@ # def run(): if len(sys.argv) < 2 or sys.argv[1] in ('--version', '--help'): - print('''\ + print( + '''\ emconfigure is a helper for configure, setting various environment variables so that emcc etc. are used. Typical usage: emconfigure ./configure [FLAGS] -(but you can run any command instead of configure)''', file=sys.stderr) +(but you can run any command instead of configure)''', + file=sys.stderr, + ) return 1 args = sys.argv[1:] diff --git a/emmake.py b/emmake.py index 426e5cc4cd63e..62cb5a2f4321d 100755 --- a/emmake.py +++ b/emmake.py @@ -34,13 +34,16 @@ # def run(): if len(sys.argv) < 2 or sys.argv[1] in ('--version', '--help'): - print('''\ + print( + '''\ emmake is a helper for make, setting various environment variables so that emcc etc. are used. Typical usage: emmake make [FLAGS] -(but you can run any command instead of make)''', file=sys.stderr) +(but you can run any command instead of make)''', + file=sys.stderr, + ) return 1 args = sys.argv[1:] diff --git a/emrun.py b/emrun.py index 0d33218c2d747..1c1c8e6606ce1 100644 --- a/emrun.py +++ b/emrun.py @@ -136,8 +136,7 @@ def tick(): def logi(msg): - """Prints a log message to 'info' stdout channel. Always printed. - """ + """Prints a log message to 'info' stdout channel. Always printed.""" global last_message_time with http_mutex: sys.stdout.write(msg + '\n') @@ -158,8 +157,7 @@ def logv(msg): def loge(msg): - """Prints an error message to stderr channel. - """ + """Prints an error message to stderr channel.""" global last_message_time with http_mutex: sys.stderr.write(msg + '\n') @@ -174,8 +172,7 @@ def format_eol(msg): def browser_logi(msg): - """Prints a message to the browser stdout output stream. - """ + """Prints a message to the browser stdout output stream.""" global last_message_time msg = format_eol(msg) browser_stdout_handle.write(msg + '\n') @@ -184,8 +181,7 @@ def browser_logi(msg): def browser_loge(msg): - """Prints a message to the browser stderr output stream. - """ + """Prints a message to the browser stderr output stream.""" global last_message_time msg = format_eol(msg) browser_stderr_handle.write(msg + '\n') @@ -330,13 +326,16 @@ def is_browser_process_alive(): if current_browser_processes: try: import psutil + for p in current_browser_processes: if psutil.pid_exists(p['pid']): return True return False except Exception: # Fail gracefully if psutil not available - logv('psutil is not available, emrun may not be able to accurately track whether the browser process is alive or not') + logv( + 'psutil is not available, emrun may not be able to accurately track whether the browser process is alive or not' + ) # We do not have a track of the browser process ID that we spawned. # Make an assumption that the browser process is open as long until @@ -381,8 +380,12 @@ def kill_browser_process(): else: logv("Terminating all processes that have string '" + processname_killed_atexit + "' in their name.") if WINDOWS: - process_image = processname_killed_atexit if '.exe' in processname_killed_atexit else (processname_killed_atexit + '.exe') - process = subprocess.Popen(['taskkill', '/F', '/IM', process_image, '/T'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process_image = ( + processname_killed_atexit if '.exe' in processname_killed_atexit else (processname_killed_atexit + '.exe') + ) + process = subprocess.Popen( + ['taskkill', '/F', '/IM', process_image, '/T'], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) process.communicate() else: try: @@ -391,7 +394,9 @@ def kill_browser_process(): try: subprocess.call(['killall', processname_killed_atexit]) except OSError: - loge('Both commands pkill and killall failed to clean up the spawned browser process. Perhaps neither of these utilities is available on your system?') + loge( + 'Both commands pkill and killall failed to clean up the spawned browser process. Perhaps neither of these utilities is available on your system?' + ) delete_emrun_safe_firefox_profile() # Clear the process name to represent that the browser is now dead. processname_killed_atexit = '' @@ -408,7 +413,7 @@ def kill_browser_process(): # process that immediately exits. def detect_browser_processes(): if not browser_exe: - return # Running with --no-browser, we are not binding to a spawned browser. + return # Running with --no-browser, we are not binding to a spawned browser. global current_browser_processes logv('First navigation occurred. Identifying currently running browser processes') @@ -421,12 +426,19 @@ def pid_existed(pid): return False for p in running_browser_processes: - logv('Detected running browser process id: ' + str(p['pid']) + ', existed already at emrun startup? ' + str(pid_existed(p['pid']))) + logv( + 'Detected running browser process id: ' + + str(p['pid']) + + ', existed already at emrun startup? ' + + str(pid_existed(p['pid'])) + ) current_browser_processes = [p for p in running_browser_processes if not pid_existed(p['pid'])] if len(current_browser_processes) == 0: - logv('Was unable to detect the browser process that was spawned by emrun. This may occur if the target page was opened in a tab on a browser process that already existed before emrun started up.') + logv( + 'Was unable to detect the browser process that was spawned by emrun. This may occur if the target page was opened in a tab on a browser process that already existed before emrun started up.' + ) # Our custom HTTP web server that will server the target page to run via .html. @@ -437,6 +449,7 @@ def pid_existed(pid): class HTTPWebServer(socketserver.ThreadingMixIn, HTTPServer): """Log messaging arriving via HTTP can come in out of sequence. Implement a sequencing mechanism to enforce ordered transmission.""" + expected_http_seq_num = 1 # Stores messages that have arrived out of order, pending for a send as soon # as the missing message arrives. Kept in sorted order, first element is the @@ -526,7 +539,13 @@ def serve_forever(self, timeout=0.5): time_since_message = now - last_message_time if emrun_options.silence_timeout != 0 and time_since_message > emrun_options.silence_timeout: self.shutdown() - logi('No activity in ' + str(emrun_options.silence_timeout) + ' seconds. Quitting web server with return code ' + str(emrun_options.timeout_returncode) + '. (--silence-timeout option)') + logi( + 'No activity in ' + + str(emrun_options.silence_timeout) + + ' seconds. Quitting web server with return code ' + + str(emrun_options.timeout_returncode) + + '. (--silence-timeout option)' + ) page_exit_code = emrun_options.timeout_returncode emrun_options.kill_exit = True @@ -534,7 +553,13 @@ def serve_forever(self, timeout=0.5): time_since_start = now - page_start_time if emrun_options.timeout != 0 and time_since_start > emrun_options.timeout: self.shutdown() - logi('Page has not finished in ' + str(emrun_options.timeout) + ' seconds. Quitting web server with return code ' + str(emrun_options.timeout_returncode) + '. (--timeout option)') + logi( + 'Page has not finished in ' + + str(emrun_options.timeout) + + ' seconds. Quitting web server with return code ' + + str(emrun_options.timeout_returncode) + + '. (--timeout option)' + ) emrun_options.kill_exit = True page_exit_code = emrun_options.timeout_returncode @@ -542,7 +567,9 @@ def serve_forever(self, timeout=0.5): if not emrun_not_enabled_nag_printed and page_last_served_time is not None: time_since_page_serve = now - page_last_served_time if not have_received_messages and time_since_page_serve > 10: - logv('The html page you are running is not emrun-capable. Stdout, stderr and exit(returncode) capture will not work. Recompile the application with the --emrun linker flag to enable this, or pass --no-emrun-detect to emrun to hide this check.') + logv( + 'The html page you are running is not emrun-capable. Stdout, stderr and exit(returncode) capture will not work. Recompile the application with the --emrun linker flag to enable this, or pass --no-emrun-detect to emrun to hide this check.' + ) emrun_not_enabled_nag_printed = True # Clean up at quit, print any leftover messages in queue. @@ -666,7 +693,7 @@ def do_POST(self): # Binary file dump/upload handling. Requests to # "stdio.html?file=filename" will write binary data to the given file. data = self.rfile.read(int(self.headers['Content-Length'])) - filename = unquote_u(query[len('file='):]) + filename = unquote_u(query[len('file=') :]) filename = os.path.join(emrun_options.dump_out_directory, os.path.normpath(filename)) try: os.makedirs(os.path.dirname(filename)) @@ -698,12 +725,16 @@ def do_POST(self): data = data.replace("+", " ") data = unquote_u(data) - if data == '^pageload^': # Browser is just notifying that it has successfully launched the page. + if data == '^pageload^': # Browser is just notifying that it has successfully launched the page. have_received_messages = True elif data.startswith('^exit^'): if not emrun_options.serve_after_exit: page_exit_code = int(data[6:]) - logv('Web page has quit with a call to exit() with return code ' + str(page_exit_code) + '. Shutting down web server. Pass --serve-after-exit to keep serving even after the page terminates with exit().') + logv( + 'Web page has quit with a call to exit() with return code ' + + str(page_exit_code) + + '. Shutting down web server. Pass --serve-after-exit to keep serving even after the page terminates with exit().' + ) # Set server socket to nonblocking on shutdown to avoid sporadic deadlocks self.server.socket.setblocking(False) self.server.shutdown() @@ -722,7 +753,7 @@ def do_POST(self): try: i = data.index('^', 5) seq_num = int(data[5:i]) - data = data[i + 1:] + data = data[i + 1 :] except ValueError: pass @@ -741,7 +772,9 @@ def do_POST(self): # Returns stdout by running command with universal_newlines=True def check_output(cmd, universal_newlines=True, *args, **kwargs): if hasattr(subprocess, "run"): - return subprocess.run(cmd, universal_newlines=universal_newlines, stdout=subprocess.PIPE, check=True, *args, **kwargs).stdout + return subprocess.run( + cmd, universal_newlines=universal_newlines, stdout=subprocess.PIPE, check=True, *args, **kwargs + ).stdout else: # check_output is considered as an old API so prefer subprocess.run if possible return subprocess.check_output(cmd, universal_newlines=universal_newlines, *args, **kwargs) @@ -757,6 +790,7 @@ def get_cpu_info(): try: if WINDOWS: from win32com.client import GetObject + root_winmgmts = GetObject('winmgmts:root\\cimv2') cpus = root_winmgmts.ExecQuery('Select * from Win32_Processor') cpu_name = cpus[0].Name + ', ' + platform.processor() @@ -780,18 +814,16 @@ def get_cpu_info(): logical_cores = physical_cores * int(re.search(r'Thread\(s\) per core: (.*)', lscpu).group(1).strip()) except Exception as e: import traceback + loge(traceback.format_exc()) - return {'model': 'Unknown ("' + str(e) + '")', - 'physicalCores': 1, - 'logicalCores': 1, - 'frequency': 0 - } + return {'model': 'Unknown ("' + str(e) + '")', 'physicalCores': 1, 'logicalCores': 1, 'frequency': 0} - return {'model': platform.machine() + ', ' + cpu_name, - 'physicalCores': physical_cores, - 'logicalCores': logical_cores, - 'frequency': frequency - } + return { + 'model': platform.machine() + ', ' + cpu_name, + 'physicalCores': physical_cores, + 'logicalCores': logical_cores, + 'frequency': frequency, + } def get_android_cpu_infoline(): @@ -800,11 +832,13 @@ def get_android_cpu_infoline(): hardware = '' for line in lines: if line.startswith('Processor'): - processor = line[line.find(':') + 1:].strip() + processor = line[line.find(':') + 1 :].strip() elif line.startswith('Hardware'): - hardware = line[line.find(':') + 1:].strip() + hardware = line[line.find(':') + 1 :].strip() - freq = int(check_output([ADB, 'shell', 'cat', '/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq']).strip()) // 1000 + freq = ( + int(check_output([ADB, 'shell', 'cat', '/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq']).strip()) // 1000 + ) return 'CPU: ' + processor + ', ' + hardware + ' @ ' + str(freq) + ' MHz' @@ -866,11 +900,11 @@ def linux_get_gpu_info(): glxinfo = check_output('glxinfo') for line in glxinfo.split("\n"): if "OpenGL vendor string:" in line: - gl_vendor = line[len("OpenGL vendor string:"):].strip() + gl_vendor = line[len("OpenGL vendor string:") :].strip() if "OpenGL version string:" in line: - gl_version = line[len("OpenGL version string:"):].strip() + gl_version = line[len("OpenGL version string:") :].strip() if "OpenGL renderer string:" in line: - gl_renderer = line[len("OpenGL renderer string:"):].strip() + gl_renderer = line[len("OpenGL renderer string:") :].strip() glinfo = gl_vendor + ' ' + gl_renderer + ', GL version ' + gl_version except Exception as e: logv(e) @@ -929,13 +963,14 @@ def get_executable_version(filename): try: if WINDOWS: import win32api + info = win32api.GetFileVersionInfo(filename, "\\") ms = info['FileVersionMS'] ls = info['FileVersionLS'] version = win32api.HIWORD(ms), win32api.LOWORD(ms), win32api.HIWORD(ls), win32api.LOWORD(ls) return '.'.join(map(str, version)) elif MACOS: - plistfile = filename[0:filename.find('MacOS')] + 'Info.plist' + plistfile = filename[0 : filename.find('MacOS')] + 'Info.plist' info = plistlib.readPlist(plistfile) # Data in Info.plists is a bit odd, this check combo gives best information on each browser. if 'firefox' in filename.lower(): @@ -959,7 +994,7 @@ def get_executable_version(filename): def get_browser_build_date(filename): try: if MACOS: - plistfile = filename[0:filename.find('MacOS')] + 'Info.plist' + plistfile = filename[0 : filename.find('MacOS')] + 'Info.plist' info = plistlib.readPlist(plistfile) # Data in Info.plists is a bit odd, this check combo gives best information on each browser. if 'firefox' in filename.lower(): @@ -979,32 +1014,55 @@ def get_browser_build_date(filename): def get_browser_info(filename, format_json): if format_json: - return json.dumps({ - 'name': browser_display_name(filename), - 'version': get_executable_version(filename), - 'buildDate': get_browser_build_date(filename) - }, indent=2) + return json.dumps( + { + 'name': browser_display_name(filename), + 'version': get_executable_version(filename), + 'buildDate': get_browser_build_date(filename), + }, + indent=2, + ) else: - return 'Browser: ' + browser_display_name(filename) + ' ' + get_executable_version(filename) + ', build ' + get_browser_build_date(filename) + return ( + 'Browser: ' + + browser_display_name(filename) + + ' ' + + get_executable_version(filename) + + ', build ' + + get_browser_build_date(filename) + ) # http://stackoverflow.com/questions/580924/python-windows-file-version-attribute def win_get_file_properties(fname): - propNames = ('Comments', 'InternalName', 'ProductName', - 'CompanyName', 'LegalCopyright', 'ProductVersion', - 'FileDescription', 'LegalTrademarks', 'PrivateBuild', - 'FileVersion', 'OriginalFilename', 'SpecialBuild') + propNames = ( + 'Comments', + 'InternalName', + 'ProductName', + 'CompanyName', + 'LegalCopyright', + 'ProductVersion', + 'FileDescription', + 'LegalTrademarks', + 'PrivateBuild', + 'FileVersion', + 'OriginalFilename', + 'SpecialBuild', + ) props = {'FixedFileInfo': None, 'StringFileInfo': None, 'FileVersion': None} import win32api + # backslash as parm returns dictionary of numeric info corresponding to VS_FIXEDFILEINFO struct fixedInfo = win32api.GetFileVersionInfo(fname, '\\') props['FixedFileInfo'] = fixedInfo - props['FileVersion'] = "%d.%d.%d.%d" % (fixedInfo['FileVersionMS'] / 65536, - fixedInfo['FileVersionMS'] % 65536, - fixedInfo['FileVersionLS'] / 65536, - fixedInfo['FileVersionLS'] % 65536) + props['FileVersion'] = "%d.%d.%d.%d" % ( + fixedInfo['FileVersionMS'] / 65536, + fixedInfo['FileVersionMS'] % 65536, + fixedInfo['FileVersionLS'] / 65536, + fixedInfo['FileVersionLS'] % 65536, + ) # \VarFileInfo\Translation returns list of available (language, codepage) # pairs that can be used to retrieve string info. We are using only the first pair. @@ -1015,7 +1073,7 @@ def win_get_file_properties(fname): strInfo = {} for propName in propNames: - strInfoPath = u'\\StringFileInfo\\%04X%04X\\%s' % (lang, codepage, propName) + strInfoPath = '\\StringFileInfo\\%04X%04X\\%s' % (lang, codepage, propName) ## print str_info strInfo[propName] = win32api.GetFileVersionInfo(fname, strInfoPath) @@ -1045,7 +1103,7 @@ def get_computer_model(): model = re.search('(.*)', model) model = model.group(1).strip() with open(os.path.join(os.getenv("HOME"), '.emrun.hwmodel.cached'), 'w') as fh: - fh.write(model) # Cache the hardware model to disk + fh.write(model) # Cache the hardware model to disk return model except Exception: hwmodel = check_output(['sysctl', 'hw.model']) @@ -1067,7 +1125,20 @@ def get_computer_model(): bios_vendor = check_output(['cat', '/sys/devices/virtual/dmi/id/bios_vendor']).strip() bios_version = check_output(['cat', '/sys/devices/virtual/dmi/id/bios_version']).strip() bios_date = check_output(['cat', '/sys/devices/virtual/dmi/id/bios_date']).strip() - return board_vendor + ' ' + board_name + ' ' + board_version + ', ' + bios_vendor + ' ' + bios_version + ' (' + bios_date + ')' + return ( + board_vendor + + ' ' + + board_name + + ' ' + + board_version + + ', ' + + bios_vendor + + ' ' + + bios_version + + ' (' + + bios_date + + ')' + ) except Exception as e: logv(str(e)) return 'Generic' @@ -1090,7 +1161,14 @@ def get_os_version(): return 'macOS ' + platform.mac_ver()[0] + bitness elif LINUX: kernel_version = check_output(['uname', '-r']).strip() - return ' '.join(platform.linux_distribution()) + ', linux kernel ' + kernel_version + ' ' + platform.architecture()[0] + bitness + return ( + ' '.join(platform.linux_distribution()) + + ', linux kernel ' + + kernel_version + + ' ' + + platform.architecture()[0] + + bitness + ) except Exception: return 'Unknown OS' @@ -1110,6 +1188,7 @@ def get_system_memory(): return int(sline[1]) * 1024 elif WINDOWS: import win32api + return win32api.GlobalMemoryStatusEx()['TotalPhys'] elif MACOS: return int(check_output(['sysctl', '-n', 'hw.memsize']).strip()) @@ -1192,14 +1271,16 @@ def find_browser(name): if MACOS: # Note: by default Firefox beta installs as 'Firefox.app', you must manually rename it to # FirefoxBeta.app after installation. - browser_locations = [('firefox', '/Applications/Firefox.app/Contents/MacOS/firefox'), - ('firefox_beta', '/Applications/FirefoxBeta.app/Contents/MacOS/firefox'), - ('firefox_aurora', '/Applications/FirefoxAurora.app/Contents/MacOS/firefox'), - ('firefox_nightly', '/Applications/FirefoxNightly.app/Contents/MacOS/firefox'), - ('safari', '/Applications/Safari.app/Contents/MacOS/Safari'), - ('opera', '/Applications/Opera.app/Contents/MacOS/Opera'), - ('chrome', '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'), - ('chrome_canary', '/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary')] + browser_locations = [ + ('firefox', '/Applications/Firefox.app/Contents/MacOS/firefox'), + ('firefox_beta', '/Applications/FirefoxBeta.app/Contents/MacOS/firefox'), + ('firefox_aurora', '/Applications/FirefoxAurora.app/Contents/MacOS/firefox'), + ('firefox_nightly', '/Applications/FirefoxNightly.app/Contents/MacOS/firefox'), + ('safari', '/Applications/Safari.app/Contents/MacOS/Safari'), + ('opera', '/Applications/Opera.app/Contents/MacOS/Opera'), + ('chrome', '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'), + ('chrome_canary', '/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary'), + ] elif WINDOWS: pf_locations = ['ProgramFiles(x86)', 'ProgramFiles', 'ProgramW6432', 'LOCALAPPDATA'] @@ -1208,31 +1289,35 @@ def find_browser(name): continue program_files = os.environ[pf_env] if WINDOWS else '' - browser_locations += [('chrome', os.path.join(program_files, 'Google/Chrome/Application/chrome.exe')), - ('chrome_canary', os.path.expanduser("~/AppData/Local/Google/Chrome SxS/Application/chrome.exe")), - ('firefox_nightly', os.path.join(program_files, 'Nightly/firefox.exe')), - ('firefox_aurora', os.path.join(program_files, 'Aurora/firefox.exe')), - ('firefox_beta', os.path.join(program_files, 'Beta/firefox.exe')), - ('firefox_beta', os.path.join(program_files, 'FirefoxBeta/firefox.exe')), - ('firefox_beta', os.path.join(program_files, 'Firefox Beta/firefox.exe')), - ('firefox', os.path.join(program_files, 'Mozilla Firefox/firefox.exe')), - ('iexplore', os.path.join(program_files, 'Internet Explorer/iexplore.exe')), - ('opera', os.path.join(program_files, 'Opera/launcher.exe'))] + browser_locations += [ + ('chrome', os.path.join(program_files, 'Google/Chrome/Application/chrome.exe')), + ('chrome_canary', os.path.expanduser("~/AppData/Local/Google/Chrome SxS/Application/chrome.exe")), + ('firefox_nightly', os.path.join(program_files, 'Nightly/firefox.exe')), + ('firefox_aurora', os.path.join(program_files, 'Aurora/firefox.exe')), + ('firefox_beta', os.path.join(program_files, 'Beta/firefox.exe')), + ('firefox_beta', os.path.join(program_files, 'FirefoxBeta/firefox.exe')), + ('firefox_beta', os.path.join(program_files, 'Firefox Beta/firefox.exe')), + ('firefox', os.path.join(program_files, 'Mozilla Firefox/firefox.exe')), + ('iexplore', os.path.join(program_files, 'Internet Explorer/iexplore.exe')), + ('opera', os.path.join(program_files, 'Opera/launcher.exe')), + ] elif LINUX: - browser_locations = [('firefox', os.path.expanduser('~/firefox/firefox')), - ('firefox_beta', os.path.expanduser('~/firefox_beta/firefox')), - ('firefox_aurora', os.path.expanduser('~/firefox_aurora/firefox')), - ('firefox_nightly', os.path.expanduser('~/firefox_nightly/firefox')), - ('chrome', which('google-chrome-stable')), - ('chrome', which('google-chrome'))] + browser_locations = [ + ('firefox', os.path.expanduser('~/firefox/firefox')), + ('firefox_beta', os.path.expanduser('~/firefox_beta/firefox')), + ('firefox_aurora', os.path.expanduser('~/firefox_aurora/firefox')), + ('firefox_nightly', os.path.expanduser('~/firefox_nightly/firefox')), + ('chrome', which('google-chrome-stable')), + ('chrome', which('google-chrome')), + ] for alias, browser_exe in browser_locations: if name == alias: if browser_exe is not None and os.path.isfile(browser_exe): return [browser_exe] - return None # Could not find the browser + return None # Could not find the browser def get_android_model(): @@ -1295,7 +1380,17 @@ def list_android_browsers(): def list_pc_browsers(): - browsers = ['firefox', 'firefox_beta', 'firefox_aurora', 'firefox_nightly', 'chrome', 'chrome_canary', 'iexplore', 'safari', 'opera'] + browsers = [ + 'firefox', + 'firefox_beta', + 'firefox_aurora', + 'firefox_nightly', + 'chrome', + 'chrome_canary', + 'iexplore', + 'safari', + 'opera', + ] logi('emrun has automatically found the following browsers in the default install locations on the system:') logi('') for browser in browsers: @@ -1306,7 +1401,9 @@ def list_pc_browsers(): logi(' - ' + browser + ': ' + browser_display_name(browser_exe) + ' ' + get_executable_version(browser_exe)) logi('') logi('You can pass the --browser option to launch with the given browser above.') - logi('Even if your browser was not detected, you can use --browser /path/to/browser/executable to launch with that browser.') + logi( + 'Even if your browser was not detected, you can use --browser /path/to/browser/executable to launch with that browser.' + ) def browser_display_name(browser): @@ -1335,8 +1432,8 @@ def subprocess_env(): e = os.environ.copy() # https://bugzilla.mozilla.org/show_bug.cgi?id=745154 e['MOZ_DISABLE_AUTO_SAFE_MODE'] = '1' - e['MOZ_DISABLE_SAFE_MODE_KEY'] = '1' # https://bugzilla.mozilla.org/show_bug.cgi?id=653410#c9 - e['JIT_OPTION_asmJSAtomicsEnable'] = 'true' # https://bugzilla.mozilla.org/show_bug.cgi?id=1299359#c0 + e['MOZ_DISABLE_SAFE_MODE_KEY'] = '1' # https://bugzilla.mozilla.org/show_bug.cgi?id=653410#c9 + e['JIT_OPTION_asmJSAtomicsEnable'] = 'true' # https://bugzilla.mozilla.org/show_bug.cgi?id=1299359#c0 return e @@ -1344,12 +1441,14 @@ def subprocess_env(): def remove_tree(d): os.chmod(d, stat.S_IWRITE) try: + def remove_readonly_and_try_again(func, path, _exc_info): if not (os.stat(path).st_mode & stat.S_IWRITE): os.chmod(path, stat.S_IWRITE) func(path) else: raise + shutil.rmtree(d, onerror=remove_readonly_and_try_again) except Exception: pass @@ -1358,14 +1457,20 @@ def remove_readonly_and_try_again(func, path, _exc_info): def get_system_info(format_json): if emrun_options.android: if format_json: - return json.dumps({'model': get_android_model(), - 'os': get_android_os_version(), - 'ram': get_system_memory(), - 'cpu': get_android_cpu_infoline() - }, indent=2) + return json.dumps( + { + 'model': get_android_model(), + 'os': get_android_os_version(), + 'ram': get_system_memory(), + 'cpu': get_android_cpu_infoline(), + }, + indent=2, + ) else: info = 'Model: ' + get_android_model() + '\n' - info += 'OS: ' + get_android_os_version() + ' with ' + str(get_system_memory() // 1024 // 1024) + ' MB of System RAM\n' + info += ( + 'OS: ' + get_android_os_version() + ' with ' + str(get_system_memory() // 1024 // 1024) + ' MB of System RAM\n' + ) info += 'CPU: ' + get_android_cpu_infoline() + '\n' return info.strip() else: @@ -1374,6 +1479,7 @@ def get_system_info(format_json): unique_system_id = fh.read().strip() except Exception: import uuid + unique_system_id = str(uuid.uuid4()) try: open(os.path.expanduser('~/.emrun.generated.guid'), 'w').write(unique_system_id) @@ -1381,25 +1487,44 @@ def get_system_info(format_json): logv(e) if format_json: - return json.dumps({'name': socket.gethostname(), - 'model': get_computer_model(), - 'os': get_os_version(), - 'ram': get_system_memory(), - 'cpu': get_cpu_info(), - 'gpu': get_gpu_info(), - 'uuid': unique_system_id}, indent=2) + return json.dumps( + { + 'name': socket.gethostname(), + 'model': get_computer_model(), + 'os': get_os_version(), + 'ram': get_system_memory(), + 'cpu': get_cpu_info(), + 'gpu': get_gpu_info(), + 'uuid': unique_system_id, + }, + indent=2, + ) else: cpu = get_cpu_info() gpus = get_gpu_info() - info = 'Computer name: ' + socket.gethostname() + '\n' # http://stackoverflow.com/questions/799767/getting-name-of-windows-computer-running-python-script + info = ( + 'Computer name: ' + socket.gethostname() + '\n' + ) # http://stackoverflow.com/questions/799767/getting-name-of-windows-computer-running-python-script info += 'Model: ' + get_computer_model() + '\n' info += 'OS: ' + get_os_version() + ' with ' + str(get_system_memory() // 1024 // 1024) + ' MB of System RAM\n' - info += 'CPU: ' + cpu['model'] + ', ' + str(cpu['frequency']) + ' MHz, ' + str(cpu['physicalCores']) + ' physical cores, ' + str(cpu['logicalCores']) + ' logical cores\n' + info += ( + 'CPU: ' + + cpu['model'] + + ', ' + + str(cpu['frequency']) + + ' MHz, ' + + str(cpu['physicalCores']) + + ' physical cores, ' + + str(cpu['logicalCores']) + + ' logical cores\n' + ) if len(gpus) == 1: info += 'GPU: ' + gpus[0]['model'] + ' with ' + str(gpus[0]['ram'] // 1024 // 1024) + " MB of VRAM\n" elif len(gpus) > 1: for i in range(0, len(gpus)): - info += 'GPU' + str(i) + ": " + gpus[i]['model'] + ' with ' + str(gpus[i]['ram'] // 1024 // 1024) + ' MBs of VRAM\n' + info += ( + 'GPU' + str(i) + ": " + gpus[i]['model'] + ' with ' + str(gpus[i]['ram'] // 1024 // 1024) + ' MBs of VRAM\n' + ) info += 'UUID: ' + unique_system_id return info.strip() @@ -1416,6 +1541,7 @@ def list_processes_by_name(exe_full_path): pids = [] try: import psutil + for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['pid', 'name', 'exe']) @@ -1449,121 +1575,170 @@ def list_processes_by_name(exe_full_path): def parse_args(args): parser = argparse.ArgumentParser(usage=usage_str) - parser.add_argument('--kill-start', action='store_true', - help='If true, any previously running instances of ' - 'the target browser are killed before starting.') - - parser.add_argument('--kill-exit', action='store_true', - help='If true, the spawned browser process is forcibly ' - 'killed when it calls exit(). Note: Using this ' - 'option may require explicitly passing the option ' - '--browser=/path/to/browser, to avoid emrun being ' - 'detached from the browser process it spawns.') - - parser.add_argument('--no-server', dest='run_server', action='store_false', - default=True, - help='If specified, a HTTP web server is not launched ' - 'to host the page to run.') - - parser.add_argument('--no-browser', dest='run_browser', action='store_false', - default=True, - help='If specified, emrun will not launch a web browser ' - 'to run the page.') - - parser.add_argument('--no-emrun-detect', action='store_true', - help='If specified, skips printing the warning message ' - 'if html page is detected to not have been built ' - 'with --emrun linker flag.') - - parser.add_argument('--serve-after-close', action='store_true', - help='If true, serves the web page even after the ' - 'application quits by user closing the web page.') - - parser.add_argument('--serve-after-exit', action='store_true', - help='If true, serves the web page even after the ' - 'application quits by a call to exit().') - - parser.add_argument('--serve-root', - help='If set, specifies the root path that the emrun ' - 'web server serves. If not specified, the directory ' - 'where the target .html page lives in is served.') - - parser.add_argument('--verbose', action='store_true', - help='Enable verbose logging from emrun internal operation.') - - parser.add_argument('--hostname', default=default_webserver_hostname, - help='Specifies the hostname the server runs in.') - - parser.add_argument('--port', default=default_webserver_port, type=int, - help='Specifies the port the server runs in.') - - parser.add_argument('--log-stdout', - help='Specifies a log filename where the browser process ' - 'stdout data will be appended to.') - - parser.add_argument('--log-stderr', - help='Specifies a log filename where the browser process stderr data will be appended to.') - - parser.add_argument('--silence-timeout', type=int, default=0, - help='If no activity is received in this many seconds, ' - 'the browser process is assumed to be hung, and the web ' - 'server is shut down and the target browser killed. ' - 'Disabled by default.') - - parser.add_argument('--timeout', type=int, default=0, - help='If the browser process does not quit or the page ' - 'exit() in this many seconds, the browser is assumed ' - 'to be hung, and the web server is shut down and the ' - 'target browser killed. Disabled by default.') - - parser.add_argument('--timeout-returncode', type=int, default=99999, - help='Sets the exit code that emrun reports back to ' - 'caller in the case that a page timeout occurs. ' - 'Default: 99999.') - - parser.add_argument('--list-browsers', action='store_true', - help='Prints out all detected browser that emrun is able ' - 'to use with the --browser command and exits.') - - parser.add_argument('--browser', - help='Specifies the browser executable to run the web page in.') - - parser.add_argument('--browser-args', default='', - help='Specifies the arguments to the browser executable.') - - parser.add_argument('--android', action='store_true', - help='Launches the page in a browser of an Android ' - 'device connected to an USB on the local system. (via adb)') - - parser.add_argument('--android-tunnel', action='store_true', - help='Expose the port directly to the Android device ' - 'and connect to it as localhost, establishing ' - 'cross origin isolation. Implies --android. A ' - 'reverse socket connection is created by adb ' - 'reverse, and remains after emrun terminates (it ' - 'can be removed by adb reverse --remove).') - - parser.add_argument('--system-info', action='store_true', - help='Prints information about the current system at startup.') - - parser.add_argument('--browser-info', action='store_true', - help='Prints information about the target browser to launch at startup.') - - parser.add_argument('--json', action='store_true', - help='If specified, --system-info and --browser-info are ' - 'outputted in JSON format.') - - parser.add_argument('--safe-firefox-profile', action='store_true', - help='If true, the browser is launched into a new clean ' - 'Firefox profile that is suitable for unattended ' - 'automated runs. (If target browser != Firefox, ' - 'this parameter is ignored)') - - parser.add_argument('--private-browsing', action='store_true', - help='If specified, opens browser in private/incognito mode.') - - parser.add_argument('--dump-out-directory', default='dump_out', type=str, - help='If specified, overrides the directory for dump files using emrun_file_dump method.') + parser.add_argument( + '--kill-start', + action='store_true', + help='If true, any previously running instances of ' 'the target browser are killed before starting.', + ) + + parser.add_argument( + '--kill-exit', + action='store_true', + help='If true, the spawned browser process is forcibly ' + 'killed when it calls exit(). Note: Using this ' + 'option may require explicitly passing the option ' + '--browser=/path/to/browser, to avoid emrun being ' + 'detached from the browser process it spawns.', + ) + + parser.add_argument( + '--no-server', + dest='run_server', + action='store_false', + default=True, + help='If specified, a HTTP web server is not launched ' 'to host the page to run.', + ) + + parser.add_argument( + '--no-browser', + dest='run_browser', + action='store_false', + default=True, + help='If specified, emrun will not launch a web browser ' 'to run the page.', + ) + + parser.add_argument( + '--no-emrun-detect', + action='store_true', + help='If specified, skips printing the warning message ' + 'if html page is detected to not have been built ' + 'with --emrun linker flag.', + ) + + parser.add_argument( + '--serve-after-close', + action='store_true', + help='If true, serves the web page even after the ' 'application quits by user closing the web page.', + ) + + parser.add_argument( + '--serve-after-exit', + action='store_true', + help='If true, serves the web page even after the ' 'application quits by a call to exit().', + ) + + parser.add_argument( + '--serve-root', + help='If set, specifies the root path that the emrun ' + 'web server serves. If not specified, the directory ' + 'where the target .html page lives in is served.', + ) + + parser.add_argument('--verbose', action='store_true', help='Enable verbose logging from emrun internal operation.') + + parser.add_argument( + '--hostname', default=default_webserver_hostname, help='Specifies the hostname the server runs in.' + ) + + parser.add_argument('--port', default=default_webserver_port, type=int, help='Specifies the port the server runs in.') + + parser.add_argument( + '--log-stdout', help='Specifies a log filename where the browser process ' 'stdout data will be appended to.' + ) + + parser.add_argument( + '--log-stderr', help='Specifies a log filename where the browser process stderr data will be appended to.' + ) + + parser.add_argument( + '--silence-timeout', + type=int, + default=0, + help='If no activity is received in this many seconds, ' + 'the browser process is assumed to be hung, and the web ' + 'server is shut down and the target browser killed. ' + 'Disabled by default.', + ) + + parser.add_argument( + '--timeout', + type=int, + default=0, + help='If the browser process does not quit or the page ' + 'exit() in this many seconds, the browser is assumed ' + 'to be hung, and the web server is shut down and the ' + 'target browser killed. Disabled by default.', + ) + + parser.add_argument( + '--timeout-returncode', + type=int, + default=99999, + help='Sets the exit code that emrun reports back to ' + 'caller in the case that a page timeout occurs. ' + 'Default: 99999.', + ) + + parser.add_argument( + '--list-browsers', + action='store_true', + help='Prints out all detected browser that emrun is able ' 'to use with the --browser command and exits.', + ) + + parser.add_argument('--browser', help='Specifies the browser executable to run the web page in.') + + parser.add_argument('--browser-args', default='', help='Specifies the arguments to the browser executable.') + + parser.add_argument( + '--android', + action='store_true', + help='Launches the page in a browser of an Android ' 'device connected to an USB on the local system. (via adb)', + ) + + parser.add_argument( + '--android-tunnel', + action='store_true', + help='Expose the port directly to the Android device ' + 'and connect to it as localhost, establishing ' + 'cross origin isolation. Implies --android. A ' + 'reverse socket connection is created by adb ' + 'reverse, and remains after emrun terminates (it ' + 'can be removed by adb reverse --remove).', + ) + + parser.add_argument( + '--system-info', action='store_true', help='Prints information about the current system at startup.' + ) + + parser.add_argument( + '--browser-info', action='store_true', help='Prints information about the target browser to launch at startup.' + ) + + parser.add_argument( + '--json', + action='store_true', + help='If specified, --system-info and --browser-info are ' 'outputted in JSON format.', + ) + + parser.add_argument( + '--safe-firefox-profile', + action='store_true', + help='If true, the browser is launched into a new clean ' + 'Firefox profile that is suitable for unattended ' + 'automated runs. (If target browser != Firefox, ' + 'this parameter is ignored)', + ) + + parser.add_argument( + '--private-browsing', action='store_true', help='If specified, opens browser in private/incognito mode.' + ) + + parser.add_argument( + '--dump-out-directory', + default='dump_out', + type=str, + help='If specified, overrides the directory for dump files using emrun_file_dump method.', + ) parser.add_argument('serve', nargs='?', default='') @@ -1637,7 +1812,9 @@ def run(args): # noqa: C901, PLR0912, PLR0915 file_to_serve = options.serve else: file_to_serve = '.' - file_to_serve_is_url = file_to_serve.startswith('file://') or file_to_serve.startswith('http://') or file_to_serve.startswith('https://') + file_to_serve_is_url = ( + file_to_serve.startswith('file://') or file_to_serve.startswith('http://') or file_to_serve.startswith('https://') + ) if options.serve_root: serve_dir = os.path.abspath(options.serve_root) @@ -1680,7 +1857,9 @@ def run(args): # noqa: C901, PLR0912, PLR0915 if options.android: if options.run_browser or options.browser_info: if not options.browser: - loge("Running on Android requires that you explicitly specify the browser to run with --browser . Run emrun --android --list-browsers to obtain a list of installed browsers you can use.") + loge( + "Running on Android requires that you explicitly specify the browser to run with --browser . Run emrun --android --list-browsers to obtain a list of installed browsers you can use." + ) return 1 elif options.browser == 'firefox': browser_app = 'org.mozilla.firefox/org.mozilla.gecko.BrowserApp' @@ -1713,15 +1892,19 @@ def run(args): # noqa: C901, PLR0912, PLR0915 url = url.replace('&', '\\&') browser = [ADB, 'shell', 'am', 'start', '-a', 'android.intent.action.VIEW', '-n', browser_app, '-d', url] - processname_killed_atexit = browser_app[:browser_app.find('/')] - else: # Launching a web page on local system. + processname_killed_atexit = browser_app[: browser_app.find('/')] + else: # Launching a web page on local system. if options.browser: options.browser = unwrap(options.browser) if options.run_browser or options.browser_info: browser = find_browser(str(options.browser)) if not browser: - loge('Unable to find browser "' + str(options.browser) + '"! Check the correctness of the passed --browser=xxx parameter!') + loge( + 'Unable to find browser "' + + str(options.browser) + + '"! Check the correctness of the passed --browser=xxx parameter!' + ) return 1 browser_exe = browser[0] browser_args = shlex.split(unwrap(options.browser_args)) @@ -1737,12 +1920,20 @@ def run(args): # noqa: C901, PLR0912, PLR0915 processname_killed_atexit = 'Safari' elif 'chrome' in browser_exe.lower(): processname_killed_atexit = 'chrome' - browser_args += ['--enable-nacl', '--enable-pnacl', '--disable-restore-session-state', '--enable-webgl', - '--no-default-browser-check', '--no-first-run', '--allow-file-access-from-files', '--password-store=basic'] + browser_args += [ + '--enable-nacl', + '--enable-pnacl', + '--disable-restore-session-state', + '--enable-webgl', + '--no-default-browser-check', + '--no-first-run', + '--allow-file-access-from-files', + '--password-store=basic', + ] if options.private_browsing: browser_args += ['--incognito'] - # if not options.run_server: - # browser_args += ['--disable-web-security'] + # if not options.run_server: + # browser_args += ['--disable-web-security'] elif 'firefox' in browser_exe.lower(): processname_killed_atexit = 'firefox' elif 'iexplore' in browser_exe.lower(): @@ -1777,7 +1968,9 @@ def run(cmd): run(['adb', 'shell', 'mkdir', '/mnt/sdcard/safe_firefox_profile']) run(['adb', 'push', os.path.join(profile_dir, 'prefs.js'), '/mnt/sdcard/safe_firefox_profile/prefs.js']) except Exception as e: - loge('Creating Firefox profile prefs.js file to internal storage in /mnt/sdcard failed with error ' + str(e) + '!') + loge( + 'Creating Firefox profile prefs.js file to internal storage in /mnt/sdcard failed with error ' + str(e) + '!' + ) loge('Try running without --safe-firefox-profile flag if unattended execution mode is not important, or') loge('enable rooted debugging on the Android device to allow adb to write files to /mnt/sdcard.') browser += ['--es', 'args', '"--profile /mnt/sdcard/safe_firefox_profile"'] @@ -1785,7 +1978,12 @@ def run(cmd): # Create temporary Firefox profile to run the page with. This is important to # run after kill_browser_process()/kill_start op above, since that cleans up # the temporary profile if one exists. - if processname_killed_atexit == 'firefox' and options.safe_firefox_profile and options.run_browser and not options.android: + if ( + processname_killed_atexit == 'firefox' + and options.safe_firefox_profile + and options.run_browser + and not options.android + ): profile_dir = create_emrun_safe_firefox_profile() browser += ['-no-remote', '--profile', profile_dir.replace('\\', '/')] @@ -1827,7 +2025,12 @@ def run(cmd): logv(browser_exe) previous_browser_processes = list_processes_by_name(browser_exe) for p in previous_browser_processes: - logv('Before spawning web browser, found a running ' + os.path.basename(browser_exe) + ' browser process id: ' + str(p['pid'])) + logv( + 'Before spawning web browser, found a running ' + + os.path.basename(browser_exe) + + ' browser process id: ' + + str(p['pid']) + ) browser_process = subprocess.Popen(browser, env=subprocess_env()) logv('Launched browser process with pid=' + str(browser_process.pid)) if options.kill_exit: @@ -1841,9 +2044,15 @@ def run(cmd): premature_quit_code = browser_process.poll() if premature_quit_code is not None: options.serve_after_close = True - logv('Warning: emrun got immediately detached from the target browser process (the process quit with exit code ' + str(premature_quit_code) + '). Cannot detect when user closes the browser. Behaving as if --serve-after-close was passed in.') + logv( + 'Warning: emrun got immediately detached from the target browser process (the process quit with exit code ' + + str(premature_quit_code) + + '). Cannot detect when user closes the browser. Behaving as if --serve-after-close was passed in.' + ) if not options.browser: - logv('Try passing the --browser=/path/to/browser option to avoid this from occurring. See https://github.com/emscripten-core/emscripten/issues/3234 for more discussion.') + logv( + 'Try passing the --browser=/path/to/browser option to avoid this from occurring. See https://github.com/emscripten-core/emscripten/issues/3234 for more discussion.' + ) if options.run_server: try: @@ -1880,7 +2089,11 @@ def main(args): returncode = run(args) logv('emrun quitting with process exit code ' + str(returncode)) if temp_firefox_profile_dir is not None: - logi('Warning: Had to leave behind a temporary Firefox profile directory ' + temp_firefox_profile_dir + ' because --safe-firefox-profile was set and the browser did not quit before emrun did.') + logi( + 'Warning: Had to leave behind a temporary Firefox profile directory ' + + temp_firefox_profile_dir + + ' because --safe-firefox-profile was set and the browser did not quit before emrun did.' + ) return returncode diff --git a/emsize.py b/emsize.py index 8bf92a3843c01..6ea849351f20a 100755 --- a/emsize.py +++ b/emsize.py @@ -61,8 +61,7 @@ def print_sizes(js_file): if not os.path.isfile(wasm_file): return error('Wasm file %s not found' % wasm_file) - sizes = shared.check_call([LLVM_SIZE, '--format=sysv', wasm_file], - stdout=subprocess.PIPE).stdout + sizes = shared.check_call([LLVM_SIZE, '--format=sysv', wasm_file], stdout=subprocess.PIPE).stdout # llvm-size may emit some number of blank lines (after the total), ignore them lines = [line for line in sizes.splitlines() if line] diff --git a/emstrip.py b/emstrip.py index f054fe7e9f1df..b330234f960cb 100755 --- a/emstrip.py +++ b/emstrip.py @@ -4,8 +4,7 @@ # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. -"""Wrapper script around `llvm-strip`. -""" +"""Wrapper script around `llvm-strip`.""" import sys from tools import shared diff --git a/emsymbolizer.py b/emsymbolizer.py index 7ba3b951c852b..37728882b53fb 100755 --- a/emsymbolizer.py +++ b/emsymbolizer.py @@ -22,8 +22,7 @@ from tools import shared from tools import webassembly -LLVM_SYMBOLIZER = os.path.expanduser( - shared.build_llvm_tool_path(shared.exe_suffix('llvm-symbolizer'))) +LLVM_SYMBOLIZER = os.path.expanduser(shared.build_llvm_tool_path(shared.exe_suffix('llvm-symbolizer'))) class Error(BaseException): @@ -68,8 +67,7 @@ def symbolize_address_symbolizer(module, address, is_dwarf): vma_adjust = get_codesec_offset(module) else: vma_adjust = 0 - cmd = [LLVM_SYMBOLIZER, '-e', module.filename, f'--adjust-vma={vma_adjust}', - str(address)] + cmd = [LLVM_SYMBOLIZER, '-e', module.filename, f'--adjust-vma={vma_adjust}', str(address)] out = shared.run_process(cmd, stdout=subprocess.PIPE).stdout.strip() out_lines = out.splitlines() @@ -184,11 +182,7 @@ def lookup(self, offset): nearest = self.find_offset(offset) assert nearest in self.mappings, 'Sourcemap has an offset with no mapping' info = self.mappings[nearest] - return LocationInfo( - self.sources[info.source] if info.source is not None else None, - info.line, - info.column - ) + return LocationInfo(self.sources[info.source] if info.source is not None else None, info.line, info.column) def symbolize_address_sourcemap(module, address, force_file): @@ -223,36 +217,32 @@ def main(args): if args.addrtype == 'code': address += get_codesec_offset(module) - if ((has_debug_line_section(module) and not args.source) or - 'dwarf' in args.source): + if (has_debug_line_section(module) and not args.source) or 'dwarf' in args.source: symbolize_address_symbolizer(module, address, is_dwarf=True) - elif ((get_sourceMappingURL_section(module) and not args.source) or - 'sourcemap' in args.source): + elif (get_sourceMappingURL_section(module) and not args.source) or 'sourcemap' in args.source: symbolize_address_sourcemap(module, address, args.file) - elif ((has_name_section(module) and not args.source) or - 'names' in args.source): + elif (has_name_section(module) and not args.source) or 'names' in args.source: symbolize_address_symbolizer(module, address, is_dwarf=False) - elif ((has_linking_section(module) and not args.source) or - 'symtab' in args.source): + elif (has_linking_section(module) and not args.source) or 'symtab' in args.source: symbolize_address_symbolizer(module, address, is_dwarf=False) else: - raise Error('No .debug_line or sourceMappingURL section found in ' - f'{module.filename}.' - " I don't know how to symbolize this file yet") + raise Error( + 'No .debug_line or sourceMappingURL section found in ' + f'{module.filename}.' + " I don't know how to symbolize this file yet" + ) def get_args(): parser = argparse.ArgumentParser() - parser.add_argument('-s', '--source', choices=['dwarf', 'sourcemap', - 'names', 'symtab'], - help='Force debug info source type', default=()) - parser.add_argument('-f', '--file', action='store', - help='Force debug info source file') - parser.add_argument('-t', '--addrtype', choices=['code', 'file'], - default='file', - help='Address type (code section or file offset)') - parser.add_argument('-v', '--verbose', action='store_true', - help='Print verbose info for debugging this script') + parser.add_argument( + '-s', '--source', choices=['dwarf', 'sourcemap', 'names', 'symtab'], help='Force debug info source type', default=() + ) + parser.add_argument('-f', '--file', action='store', help='Force debug info source file') + parser.add_argument( + '-t', '--addrtype', choices=['code', 'file'], default='file', help='Address type (code section or file offset)' + ) + parser.add_argument('-v', '--verbose', action='store_true', help='Print verbose info for debugging this script') parser.add_argument('wasm_file', help='Wasm file') parser.add_argument('address', help='Address to lookup') args = parser.parse_args() diff --git a/pyproject.toml b/pyproject.toml index f4c62aacb8f16..dd3ab05e3c472 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,6 +12,10 @@ exclude = [ ".git", ] +format.quote-style = "preserve" +indent-width = 2 +line-length = 120 + lint.select = [ "ARG", "ASYNC", diff --git a/requirements-dev.txt b/requirements-dev.txt index 6fea74921d9df..6199b7870088e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,7 +6,7 @@ coverage[toml]==5.5 mypy==0.971 -ruff==0.8.2 +ruff==0.8.3 types-requests==2.27.14 unittest-xml-reporting==3.1.0 diff --git a/site/source/get_api_items.py b/site/source/get_api_items.py index ee94f6909e6d3..404bb42eee026 100755 --- a/site/source/get_api_items.py +++ b/site/source/get_api_items.py @@ -26,77 +26,83 @@ def parseFiles(): - """Parse api-reference files to extract the code items. - """ - - def addapiitems(matchobj): - # print 'matcobj0: %s' % matchobj.group(0) - # print 'matcobj1: %s' % matchobj.group(1) - # print 'matcobj2: %s' % matchobj.group(2) - # print 'matcobj3: %s' % matchobj.group(3) - # print 'matcobj4: %s' % matchobj.group(4) - - lang = matchobj.group(2) - data_type = matchobj.group(3) - if data_type == 'function': - data_type = 'func' - api_item = matchobj.group(4) - api_item = api_item.strip() - api_item = api_item.split('(')[0] - try: - api_item = api_item.split(' ')[1] - except IndexError: - pass - - # print lang - # print data_type - # print api_item - - api_reference_items[api_item] = ':%s:%s:`%s`' % (lang, data_type, api_item) - # Add additional index for functions declared as func() rather than just func - if data_type == 'func': - api_item_index = api_item + '()' - api_reference_items[api_item_index] = ':%s:%s:`%s`' % (lang, data_type, api_item) - - # print api_reference_items[api_item] - - for file in os.listdir(api_reference_directory): - if file.endswith(".rst"): - filepath = api_reference_directory + file - print(file) - # open file - with open(filepath, 'r') as infile: - for line in infile: - # parse line for API items - re.sub(r'^\.\.\s+((\w+)\:(\w+)\:\:(.*))', addapiitems, line) + """Parse api-reference files to extract the code items.""" + + def addapiitems(matchobj): + # print 'matcobj0: %s' % matchobj.group(0) + # print 'matcobj1: %s' % matchobj.group(1) + # print 'matcobj2: %s' % matchobj.group(2) + # print 'matcobj3: %s' % matchobj.group(3) + # print 'matcobj4: %s' % matchobj.group(4) + + lang = matchobj.group(2) + data_type = matchobj.group(3) + if data_type == 'function': + data_type = 'func' + api_item = matchobj.group(4) + api_item = api_item.strip() + api_item = api_item.split('(')[0] + try: + api_item = api_item.split(' ')[1] + except IndexError: + pass + + # print lang + # print data_type + # print api_item + + api_reference_items[api_item] = ':%s:%s:`%s`' % (lang, data_type, api_item) + # Add additional index for functions declared as func() rather than just func + if data_type == 'func': + api_item_index = api_item + '()' + api_reference_items[api_item_index] = ':%s:%s:`%s`' % (lang, data_type, api_item) + + # print api_reference_items[api_item] + + for file in os.listdir(api_reference_directory): + if file.endswith(".rst"): + filepath = api_reference_directory + file + print(file) + # open file + with open(filepath, 'r') as infile: + for line in infile: + # parse line for API items + re.sub(r'^\.\.\s+((\w+)\:(\w+)\:\:(.*))', addapiitems, line) def exportItems(): - """Export the API items into form for use in another script. - """ - with open(api_item_filename, 'w') as infile: - # write function lead in - infile.write("# Auto-generated file (see get_api_items.py)\n\ndef get_mapped_items():\n mapped_wiki_inline_code = dict()\n") + """Export the API items into form for use in another script.""" + with open(api_item_filename, 'w') as infile: + # write function lead in + infile.write( + "# Auto-generated file (see get_api_items.py)\n\ndef get_mapped_items():\n mapped_wiki_inline_code = dict()\n" + ) - items = list((key, value) for key, value in api_reference_items.items()) - items.sort() - for key, value in items: - # Write out each API item to add - infile.write(" mapped_wiki_inline_code['%s'] = '%s'\n" % (key, value)) + items = list((key, value) for key, value in api_reference_items.items()) + items.sort() + for key, value in items: + # Write out each API item to add + infile.write(" mapped_wiki_inline_code['%s'] = '%s'\n" % (key, value)) - # write the return function - infile.write(" return mapped_wiki_inline_code\n") + # write the return function + infile.write(" return mapped_wiki_inline_code\n") def main(): - parser = optparse.OptionParser(usage="Usage: %prog [options] version") - parser.add_option("-s", "--siteapi", dest="siteapi", default="http://www.developer.nokia.com/Community/Wiki/api.php", help="Location of API") - (options, args) = parser.parse_args() - # print 'Site: %s' % options.siteapi - parseFiles() - exportItems() - return 0 + parser = optparse.OptionParser(usage="Usage: %prog [options] version") + parser.add_option( + "-s", + "--siteapi", + dest="siteapi", + default="http://www.developer.nokia.com/Community/Wiki/api.php", + help="Location of API", + ) + (options, args) = parser.parse_args() + # print 'Site: %s' % options.siteapi + parseFiles() + exportItems() + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/site/source/get_wiki.py b/site/source/get_wiki.py index 899531541f222..dbe2a3c5e5b78 100755 --- a/site/source/get_wiki.py +++ b/site/source/get_wiki.py @@ -39,185 +39,197 @@ temp_set_of_codemarkup = set() logfile = open(logfilename, 'w') # snapshot_version_information = '.. note:: This is a **snapshot** of the wiki: %s\n\n' % strftime("%a, %d %b %Y %H:%M", gmtime()) -snapshot_version_information = '.. note:: This article was migrated from the wiki (%s) and is now the "master copy" (the version in the wiki will be deleted). It may not be a perfect rendering of the original but we hope to fix that soon!\n\n' % time.strftime("%a, %d %b %Y %H:%M", time.gmtime()) +snapshot_version_information = ( + '.. note:: This article was migrated from the wiki (%s) and is now the "master copy" (the version in the wiki will be deleted). It may not be a perfect rendering of the original but we hope to fix that soon!\n\n' + % time.strftime("%a, %d %b %Y %H:%M", time.gmtime()) +) def CleanWiki(): - """Delete the wiki clone directory and all contained files. - """ + """Delete the wiki clone directory and all contained files.""" - def errorhandler(func, path, exc_info): - # where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info() - print(func) - print(path) - print(exc_info) - os.chmod(path, stat.S_IWRITE) - os.remove(path) + def errorhandler(func, path, exc_info): + # where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info() + print(func) + print(path) + print(exc_info) + os.chmod(path, stat.S_IWRITE) + os.remove(path) - try: - shutil.rmtree(output_dir, ignore_errors=False, onerror=errorhandler) - print('Old wiki clone removed') - except IOError: - print('No directory to clean found') + try: + shutil.rmtree(output_dir, ignore_errors=False, onerror=errorhandler) + print('Old wiki clone removed') + except IOError: + print('No directory to clean found') def CloneWiki(): - """ - Clone the wiki into a temporary location (first cleaning) - """ - # Clean up existing repo - CleanWiki() + """ + Clone the wiki into a temporary location (first cleaning) + """ + # Clean up existing repo + CleanWiki() - # Create directory for output and temporary files - try: - os.makedirs(output_dir) - print('Created directory') - except OSError: - pass + # Create directory for output and temporary files + try: + os.makedirs(output_dir) + print('Created directory') + except OSError: + pass - # Clone - git_clone_command = 'git clone %s %s' % (wiki_repo, wiki_checkout) - print(git_clone_command) - os.system(git_clone_command) + # Clone + git_clone_command = 'git clone %s %s' % (wiki_repo, wiki_checkout) + print(git_clone_command) + os.system(git_clone_command) def ConvertFilesToRst(): - """ - Add template to specified page object (wikitools) - """ - indexfiletext = '============================\nWiki snapshot (ready-for-review)\n============================\n\n%s\n.. toctree::\n :maxdepth: 2\n' % snapshot_version_information - for file in os.listdir(wiki_checkout): - if not file.endswith(".md"): - continue - - inputfilename = wiki_checkout + file - markdown = Path(inputfilename).read_text() - if 'This article has moved from the wiki to the new site' in markdown: - continue - if 'This page has been migrated to the main site' in markdown: - continue - - print(file) - # get name of file - filenamestripped = os.path.splitext(file)[0] - indexfiletext += '\n %s' % filenamestripped - outputfilename = output_dir + filenamestripped + '.rst' - - command = 'pandoc -f markdown -t rst -o "%s" "%s"' % (outputfilename, inputfilename) - print(command) - if os.system(command): - sys.exit(1) - title = filenamestripped.replace('-', ' ') - # print title - logfile.write('title from filename: %s \n' % title) - # add import message to title - title += ' (wiki-import)' - length = len(title) - # print length - headerbar = '' - for _ in range(length): - headerbar += '=' - page_reference = filenamestripped - page_reference_link_text = '.. _%s:\n\n' % page_reference - titlebar = page_reference_link_text + headerbar + '\n' + title + '\n' + headerbar + '\n' - textinfile = '' - # Add titlebar to start of the file (from the filename) - textinfile += titlebar - # Add wiki snapshot information - - textinfile += snapshot_version_information - - with open(outputfilename) as infile: - for line in infile: - textinfile += line - - # print textinfile - with open(outputfilename, 'w') as outfile: - outfile.write(textinfile) - - # write the index - with open(output_dir + 'index.rst', 'w') as outfile: - outfile.write(indexfiletext) + """ + Add template to specified page object (wikitools) + """ + indexfiletext = ( + '============================\nWiki snapshot (ready-for-review)\n============================\n\n%s\n.. toctree::\n :maxdepth: 2\n' + % snapshot_version_information + ) + for file in os.listdir(wiki_checkout): + if not file.endswith(".md"): + continue + + inputfilename = wiki_checkout + file + markdown = Path(inputfilename).read_text() + if 'This article has moved from the wiki to the new site' in markdown: + continue + if 'This page has been migrated to the main site' in markdown: + continue + + print(file) + # get name of file + filenamestripped = os.path.splitext(file)[0] + indexfiletext += '\n %s' % filenamestripped + outputfilename = output_dir + filenamestripped + '.rst' + + command = 'pandoc -f markdown -t rst -o "%s" "%s"' % (outputfilename, inputfilename) + print(command) + if os.system(command): + sys.exit(1) + title = filenamestripped.replace('-', ' ') + # print title + logfile.write('title from filename: %s \n' % title) + # add import message to title + title += ' (wiki-import)' + length = len(title) + # print length + headerbar = '' + for _ in range(length): + headerbar += '=' + page_reference = filenamestripped + page_reference_link_text = '.. _%s:\n\n' % page_reference + titlebar = page_reference_link_text + headerbar + '\n' + title + '\n' + headerbar + '\n' + textinfile = '' + # Add titlebar to start of the file (from the filename) + textinfile += titlebar + # Add wiki snapshot information + + textinfile += snapshot_version_information + + with open(outputfilename) as infile: + for line in infile: + textinfile += line + + # print textinfile + with open(outputfilename, 'w') as outfile: + outfile.write(textinfile) + + # write the index + with open(output_dir + 'index.rst', 'w') as outfile: + outfile.write(indexfiletext) def FixupConvertedRstFiles(): - """Add template to specified page object (wikitools) + """Add template to specified page object (wikitools)""" + + def fixInternalWikiLinks(aOldText): + """ + Fixes wiki links in [[linkname]] format by changing this to a document link in current directory. + """ + + def fixwikilinks(matchobj): + # print 'matcobj0: %s' % matchobj.group(0) + # print 'matcobj1: %s' % matchobj.group(1) + linktext = matchobj.group(1) + linktext = linktext.replace(' ', '-') + # linktext = ':doc:`%s`' % linktext + # use reference for linking as allows pages to be moved around + linktext = ':ref:`%s`' % linktext + # print 'linkdoc: %s' % linktext + logfile.write('linkdoc: %s \n' % linktext) + return linktext + + # print 'fixing wiki links' + return re.sub(r'\[\[(.+?)\]\]', fixwikilinks, aOldText) + + def fixWikiCodeMarkupToCodeLinks(aOldText): """ + Links "known" code objects if they are found in wiki markup. + """ + + def fixcodemarkuplinks(matchobj): + # print 'Inline code: %s' % matchobj.group(0) + # print 'matcobj1: %s' % matchobj.group(1) + temp_set_of_codemarkup.add(matchobj.group(0)) + linktext = matchobj.group(1) + if linktext in mapped_wiki_inline_code: + logfile.write('Replace: %s \n' % mapped_wiki_inline_code[linktext]) + return mapped_wiki_inline_code[linktext] + + return matchobj.group(0) # linktext + + # print 'fixing up code markup to code reference' + return re.sub(r'``(.+?)``', fixcodemarkuplinks, aOldText) + + for file in os.listdir(output_dir): + if file.endswith(".rst"): + input_file = output_dir + file + # print input_file + textinfile = '' + with open(input_file) as infile: + for line in infile: + textinfile += line + + # print textinfile + # fix up broken wiki-page links in files + textinfile = fixInternalWikiLinks(textinfile) + + # convert codemarkup to links if possible + textinfile = fixWikiCodeMarkupToCodeLinks(textinfile) + + with open(input_file, 'w') as outfile: + outfile.write(textinfile) - def fixInternalWikiLinks(aOldText): - """ - Fixes wiki links in [[linkname]] format by changing this to a document link in current directory. - """ - def fixwikilinks(matchobj): - # print 'matcobj0: %s' % matchobj.group(0) - # print 'matcobj1: %s' % matchobj.group(1) - linktext = matchobj.group(1) - linktext = linktext.replace(' ', '-') - # linktext = ':doc:`%s`' % linktext - # use reference for linking as allows pages to be moved around - linktext = ':ref:`%s`' % linktext - # print 'linkdoc: %s' % linktext - logfile.write('linkdoc: %s \n' % linktext) - return linktext - # print 'fixing wiki links' - return re.sub(r'\[\[(.+?)\]\]', fixwikilinks, aOldText) - - def fixWikiCodeMarkupToCodeLinks(aOldText): - """ - Links "known" code objects if they are found in wiki markup. - """ - def fixcodemarkuplinks(matchobj): - # print 'Inline code: %s' % matchobj.group(0) - # print 'matcobj1: %s' % matchobj.group(1) - temp_set_of_codemarkup.add(matchobj.group(0)) - linktext = matchobj.group(1) - if linktext in mapped_wiki_inline_code: - logfile.write('Replace: %s \n' % mapped_wiki_inline_code[linktext]) - return mapped_wiki_inline_code[linktext] - - return matchobj.group(0) # linktext - # print 'fixing up code markup to code reference' - return re.sub(r'``(.+?)``', fixcodemarkuplinks, aOldText) - - for file in os.listdir(output_dir): - if file.endswith(".rst"): - input_file = output_dir + file - # print input_file - textinfile = '' - with open(input_file) as infile: - for line in infile: - textinfile += line - - # print textinfile - # fix up broken wiki-page links in files - textinfile = fixInternalWikiLinks(textinfile) - - # convert codemarkup to links if possible - textinfile = fixWikiCodeMarkupToCodeLinks(textinfile) - - with open(input_file, 'w') as outfile: - outfile.write(textinfile) - - logfile.write('\n\nCODE MARKUP THAT WONT BE LINKED (add entry to mapped_wiki_inline_code if one of these need to be linked. The tool get-api-items.py can be used to generate the list of the documented API items. \n') - for item in temp_set_of_codemarkup: - logfile.write('%s\n' % item) + logfile.write( + '\n\nCODE MARKUP THAT WONT BE LINKED (add entry to mapped_wiki_inline_code if one of these need to be linked. The tool get-api-items.py can be used to generate the list of the documented API items. \n' + ) + for item in temp_set_of_codemarkup: + logfile.write('%s\n' % item) # parser options def main(): - parser = optparse.OptionParser(version="%prog 0.1.1", usage="Usage: %prog [options] version") - parser.add_option("-c", "--clonewiki", action="store_true", default=False, dest="clonewiki", help="Clean and clone the latest wiki") - options, args = parser.parse_args() + parser = optparse.OptionParser(version="%prog 0.1.1", usage="Usage: %prog [options] version") + parser.add_option( + "-c", "--clonewiki", action="store_true", default=False, dest="clonewiki", help="Clean and clone the latest wiki" + ) + options, args = parser.parse_args() - print('Clone wiki: %s' % options.clonewiki) - if options.clonewiki: - CloneWiki() - # input = raw_input('CHECK ALL files were cloned! (look for "error: unable to create file" )\n') + print('Clone wiki: %s' % options.clonewiki) + if options.clonewiki: + CloneWiki() + # input = raw_input('CHECK ALL files were cloned! (look for "error: unable to create file" )\n') - ConvertFilesToRst() - FixupConvertedRstFiles() - print('See LOG for details: %s ' % logfilename) + ConvertFilesToRst() + FixupConvertedRstFiles() + print('See LOG for details: %s ' % logfilename) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/system/bin/sdl-config.py b/system/bin/sdl-config.py index e028a4f7397be..9d1bb92a72622 100755 --- a/system/bin/sdl-config.py +++ b/system/bin/sdl-config.py @@ -11,4 +11,3 @@ print('') elif '--version' in args: print('1.3.0') - diff --git a/system/bin/sdl2-config.py b/system/bin/sdl2-config.py index 957ef5900a3ec..bb8266c7dabec 100755 --- a/system/bin/sdl2-config.py +++ b/system/bin/sdl2-config.py @@ -10,4 +10,3 @@ print('-sUSE_SDL=2') elif '--version' in args: print('2.0.10') - diff --git a/test/benchmark/benchmark_sse.py b/test/benchmark/benchmark_sse.py index 65f296c1dafce..e24f60e2e208a 100644 --- a/test/benchmark/benchmark_sse.py +++ b/test/benchmark/benchmark_sse.py @@ -34,161 +34,195 @@ def run_benchmark(benchmark_file, results_file, build_args): - # Run native build - out_file = os.path.join(temp_dir, 'benchmark_sse_native') - if WINDOWS: - out_file += '.exe' - cmd = [CLANG_CXX] + clang_native.get_clang_native_args() + [benchmark_file, '-O3', '-o', out_file] - print('Building native version of the benchmark:') - print(' '.join(cmd)) - run_process(cmd, env=clang_native.get_clang_native_env()) + # Run native build + out_file = os.path.join(temp_dir, 'benchmark_sse_native') + if WINDOWS: + out_file += '.exe' + cmd = [CLANG_CXX] + clang_native.get_clang_native_args() + [benchmark_file, '-O3', '-o', out_file] + print('Building native version of the benchmark:') + print(' '.join(cmd)) + run_process(cmd, env=clang_native.get_clang_native_env()) - native_results = Popen([out_file], stdout=PIPE, stderr=PIPE).communicate() - print(native_results[0]) + native_results = Popen([out_file], stdout=PIPE, stderr=PIPE).communicate() + print(native_results[0]) - # Run emscripten build - out_file = os.path.join(temp_dir, 'benchmark_sse_html.js') - cmd = [EMCC, benchmark_file, '-O3', '-sTOTAL_MEMORY=536870912', '-o', out_file] + build_args - print('Building Emscripten version of the benchmark:') - print(' '.join(cmd)) - run_process(cmd) + # Run emscripten build + out_file = os.path.join(temp_dir, 'benchmark_sse_html.js') + cmd = [EMCC, benchmark_file, '-O3', '-sTOTAL_MEMORY=536870912', '-o', out_file] + build_args + print('Building Emscripten version of the benchmark:') + print(' '.join(cmd)) + run_process(cmd) - cmd = V8_ENGINE + ['--experimental-wasm-simd', os.path.basename(out_file)] - print(' '.join(cmd)) - old_dir = os.getcwd() - os.chdir(os.path.dirname(out_file)) - wasm_results = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate() - os.chdir(old_dir) + cmd = V8_ENGINE + ['--experimental-wasm-simd', os.path.basename(out_file)] + print(' '.join(cmd)) + old_dir = os.getcwd() + os.chdir(os.path.dirname(out_file)) + wasm_results = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate() + os.chdir(old_dir) - if not wasm_results: - raise Exception('Unable to run benchmark in V8!') + if not wasm_results: + raise Exception('Unable to run benchmark in V8!') - if not wasm_results[0].strip(): - print(wasm_results[1]) - sys.exit(1) + if not wasm_results[0].strip(): + print(wasm_results[1]) + sys.exit(1) - print(wasm_results[0]) + print(wasm_results[0]) - def strip_comments(text): - return re.sub('//.*?\n|/\*.*?\*/', '', text, re.S) # noqa + def strip_comments(text): + return re.sub('//.*?\n|/\*.*?\*/', '', text, re.S) # noqa - benchmark_results = strip_comments(wasm_results[0]) + benchmark_results = strip_comments(wasm_results[0]) - # Strip out unwanted print output. - benchmark_results = benchmark_results[benchmark_results.find('{'):].strip() - if '*************************' in benchmark_results: - benchmark_results = benchmark_results[:benchmark_results.find('*************************')].strip() + # Strip out unwanted print output. + benchmark_results = benchmark_results[benchmark_results.find('{') :].strip() + if '*************************' in benchmark_results: + benchmark_results = benchmark_results[: benchmark_results.find('*************************')].strip() - print(benchmark_results) + print(benchmark_results) - shutil.rmtree(temp_dir) + shutil.rmtree(temp_dir) - native_results = json.loads(native_results[0]) - benchmark_results = benchmark_results[benchmark_results.index('{'):benchmark_results.rindex('}') + 1] - wasm_results = json.loads(benchmark_results) + native_results = json.loads(native_results[0]) + benchmark_results = benchmark_results[benchmark_results.index('{') : benchmark_results.rindex('}') + 1] + wasm_results = json.loads(benchmark_results) - # native_workload = native_results['workload'] - # html_workload = wasm_results['workload'] + # native_workload = native_results['workload'] + # html_workload = wasm_results['workload'] - html = '''

SSE JavaScript Benchmark

+ html = ( + '''

SSE JavaScript Benchmark

System Info:
- ''' + system_info[0].replace('\n', '
') + ''' + ''' + + system_info[0].replace('\n', '
') + + ''' Native Clang Compiler:
- ''' + native_info[1].replace('\n', '
') + ''' + ''' + + native_info[1].replace('\n', '
') + + ''' Emscripten Compiler:
- ''' + emscripten_info[0].replace('\n', '
') - - charts_native = {} - charts_html = {} - for result in native_results['results']: - ch = result['chart'] - if ch not in charts_native: - charts_native[ch] = [] - charts_native[ch] += [result] - for result in wasm_results['results']: - ch = result['chart'] - if ch not in charts_html: - charts_html[ch] = [] - charts_html[ch] += [result] - - def find_result_in_category(results, category): - for result in results: - if result['category'] == category: - return result - return None - - def format_comparison(a, b): - if a < b and a != 0: - return " {:10.2f}".format(b / a) + 'x FASTER' - elif b != 0: - return " {:10.2f}".format(a / b) + 'x SLOWER' - else: - return " NaN " - - chartNumber = 0 - - total_time_native_scalar = 0 - total_time_native_simd = 0 - total_time_html_scalar = 0 - total_time_html_simd = 0 - - for chart_name, chart_native_results in charts_native.items(): - # Extract data for each chart. - categories = [] - nativeScalarResults = [] - nativeSimdResults = [] - htmlScalarResults = [] - htmlSimdResults = [] - native_results = chart_native_results - wasm_results = charts_html[chart_name] - textual_results_native = '

' - textual_results_html = '

' - textual_results_html2 = '

' - textual_results_html3 = '

' - for result in native_results: - categories += ["'" + result['category'] + "'"] - nsc = result['scalar'] - nsi = result['simd'] - nativeScalarResults += [str(nsc)] - nativeSimdResults += [str(nsi)] - html_result = find_result_in_category(wasm_results, result['category']) - textual_results_native += 'Native ' + result['category'] + ': ' + "{:10.4f}".format(nsc) + 'ns -> ' + "{:10.4f}".format(nsi) + 'ns. ' - textual_results_native += 'Native SSE is ' + format_comparison(nsi, nsc) + ' than native scalar.        
' - - if html_result is not None: - hsc = html_result['scalar'] - htmlScalarResults += [str(hsc)] - hsi = html_result['simd'] - htmlSimdResults += [str(hsi)] - textual_results_html += 'JS ' + result['category'] + ': ' + "{:10.4f}".format(hsc) + 'ns -> ' + "{:10.4f}".format(hsi) + 'ns. ' - textual_results_html += 'JS SSE is ' + format_comparison(hsi, hsc) + ' than JS scalar.        
' - textual_results_html2 += 'JS ' + result['category'] + ': JS scalar is ' + format_comparison(hsc, nsc) + ' than native scalar.        
' - textual_results_html3 += 'JS ' + result['category'] + ': JS SSE is ' + format_comparison(hsi, nsi) + ' than native SSE.        
' - total_time_native_scalar += nsc - total_time_native_simd += nsi - total_time_html_scalar += hsc - total_time_html_simd += hsi - else: - htmlScalarResults += [str(-1)] - htmlSimdResults += [str(-1)] - - chartNumber += 1 - html += '

' - html += '''''' + '
' + textual_results_native + '' + textual_results_html + '
' + textual_results_html2 + '' + textual_results_html3 + '
' - - # Final overall score - - html += '
' - html += '''''' + + '
' + + textual_results_native + + '' + + textual_results_html + + '
' + + textual_results_html2 + + '' + + textual_results_html3 + + '
' + ) + + # Final overall score + + html += '
' + html += ( + '''''' + ) - html += '' + html += '' - open(results_file, 'w').write(html) - print('Wrote ' + str(len(html)) + ' bytes to file ' + results_file + '.') + open(results_file, 'w').write(html) + print('Wrote ' + str(len(html)) + ' bytes to file ' + results_file + '.') if __name__ == '__main__': - suite = sys.argv[1].lower() if len(sys.argv) == 2 else None - if suite in ['sse', 'sse1']: - run_benchmark(test_file('sse/benchmark_sse1.cpp'), 'results_sse1.html', ['-msse']) - elif suite == 'sse2': - run_benchmark(test_file('sse/benchmark_sse2.cpp'), 'results_sse2.html', ['-msse2']) - elif suite == 'sse3': - run_benchmark(test_file('sse/benchmark_sse3.cpp'), 'results_sse3.html', ['-msse3']) - elif suite == 'ssse3': - run_benchmark(test_file('sse/benchmark_ssse3.cpp'), 'results_ssse3.html', ['-mssse3']) - else: - raise Exception('Usage: python test/benchmark_sse.py sse1|sse2|sse3') + suite = sys.argv[1].lower() if len(sys.argv) == 2 else None + if suite in ['sse', 'sse1']: + run_benchmark(test_file('sse/benchmark_sse1.cpp'), 'results_sse1.html', ['-msse']) + elif suite == 'sse2': + run_benchmark(test_file('sse/benchmark_sse2.cpp'), 'results_sse2.html', ['-msse2']) + elif suite == 'sse3': + run_benchmark(test_file('sse/benchmark_sse3.cpp'), 'results_sse3.html', ['-msse3']) + elif suite == 'ssse3': + run_benchmark(test_file('sse/benchmark_ssse3.cpp'), 'results_ssse3.html', ['-mssse3']) + else: + raise Exception('Usage: python test/benchmark_sse.py sse1|sse2|sse3') diff --git a/test/clang_native.py b/test/clang_native.py index 1193eb969bf52..027bd5205f2f5 100644 --- a/test/clang_native.py +++ b/test/clang_native.py @@ -15,15 +15,15 @@ def get_native_triple(): arch = { - 'aarch64': 'arm64', - 'arm64': 'arm64', - 'x86_64': 'x86_64', - 'AMD64': 'x86_64', + 'aarch64': 'arm64', + 'arm64': 'arm64', + 'x86_64': 'x86_64', + 'AMD64': 'x86_64', }[platform.machine()] OS = { - 'linux': 'linux', - 'darwin': 'darwin', - 'win32': 'windows-msvc', + 'linux': 'linux', + 'darwin': 'darwin', + 'win32': 'windows-msvc', }[sys.platform] return f'{arch}-{OS}' @@ -78,7 +78,11 @@ def get_clang_native_env(): else: visual_studio_path = 'C:\\Program Files (x86)\\Microsoft Visual Studio 14.0' if not os.path.isdir(visual_studio_path): - raise Exception('Visual Studio 2015 was not found in "' + visual_studio_path + '"! Run in Visual Studio X64 command prompt to avoid the need to autoguess this location (or set VSINSTALLDIR env var).') + raise Exception( + 'Visual Studio 2015 was not found in "' + + visual_studio_path + + '"! Run in Visual Studio X64 command prompt to avoid the need to autoguess this location (or set VSINSTALLDIR env var).' + ) # Guess where Program Files (x86) is located if 'ProgramFiles(x86)' in env: @@ -98,19 +102,31 @@ def get_clang_native_env(): else: windows8_sdk_dir = os.path.join(prog_files_x86, 'Windows Kits', '8.1') if not os.path.isdir(windows8_sdk_dir): - raise Exception('Windows 8.1 SDK was not found in "' + windows8_sdk_dir + '"! Run in Visual Studio command prompt to avoid the need to autoguess this location (or set WindowsSdkDir env var).') + raise Exception( + 'Windows 8.1 SDK was not found in "' + + windows8_sdk_dir + + '"! Run in Visual Studio command prompt to avoid the need to autoguess this location (or set WindowsSdkDir env var).' + ) # Guess where Windows 10 SDK is located if os.path.isdir(os.path.join(prog_files_x86, 'Windows Kits', '10')): windows10_sdk_dir = os.path.join(prog_files_x86, 'Windows Kits', '10') if not os.path.isdir(windows10_sdk_dir): - raise Exception('Windows 10 SDK was not found in "' + windows10_sdk_dir + '"! Run in Visual Studio command prompt to avoid the need to autoguess this location.') + raise Exception( + 'Windows 10 SDK was not found in "' + + windows10_sdk_dir + + '"! Run in Visual Studio command prompt to avoid the need to autoguess this location.' + ) env.setdefault('VSINSTALLDIR', visual_studio_path) env.setdefault('VCINSTALLDIR', os.path.join(visual_studio_path, 'VC')) windows10sdk_kits_include_dir = os.path.join(windows10_sdk_dir, 'Include') - windows10sdk_kit_version_name = [x for x in os.listdir(windows10sdk_kits_include_dir) if os.path.isdir(os.path.join(windows10sdk_kits_include_dir, x))][0] # e.g. "10.0.10150.0" or "10.0.10240.0" + windows10sdk_kit_version_name = [ + x + for x in os.listdir(windows10sdk_kits_include_dir) + if os.path.isdir(os.path.join(windows10sdk_kits_include_dir, x)) + ][0] # e.g. "10.0.10150.0" or "10.0.10240.0" def append_item(key, item): if key not in env or len(env[key].strip()) == 0: diff --git a/test/common.py b/test/common.py index b6e958af77107..e311bfa34e8a4 100644 --- a/test/common.py +++ b/test/common.py @@ -191,6 +191,7 @@ def no_wasm64(note=''): def decorated(f): return skip_if(f, 'is_wasm64', note) + return decorated @@ -206,7 +207,9 @@ def decorated(self, *args, **kwargs): if self.get_setting('INITIAL_MEMORY') == '2200mb': self.skipTest(note) f(self, *args, **kwargs) + return decorated + return decorator @@ -221,7 +224,9 @@ def decorated(self, *args, **kwargs): if self.is_4gb(): self.skipTest(note) f(self, *args, **kwargs) + return decorated + return decorator @@ -328,6 +333,7 @@ def node_pthreads(f): def decorated(self, *args, **kwargs): self.setup_node_pthreads() f(self, *args, **kwargs) + return decorated @@ -366,6 +372,7 @@ def decorated(f): def modified(self, *args, **kwargs): with env_modify(updates): return f(self, *args, **kwargs) + return modified return decorated @@ -383,8 +390,7 @@ def metafunc(self, wasmfs, *args, **kwargs): self.emcc_args.append('-DWASMFS') f(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'wasmfs': (True,)}) + parameterize(metafunc, {'': (False,), 'wasmfs': (True,)}) return metafunc @@ -401,14 +407,12 @@ def metafunc(self, rawfs, *args, **kwargs): self.set_setting('NODERAWFS') func(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'rawfs': (True,)}) + parameterize(metafunc, {'': (False,), 'rawfs': (True,)}) return metafunc # Decorator version of env_modify def also_with_env_modify(name_updates_mapping): - def decorated(f): @wraps(f) def metafunc(self, updates, *args, **kwargs): @@ -443,8 +447,7 @@ def metafunc(self, with_minimal_runtime, *args, **kwargs): self.set_setting('MINIMAL_RUNTIME', 1) f(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'minimal_runtime': (True,)}) + parameterize(metafunc, {'': (False,), 'minimal_runtime': (True,)}) return metafunc @@ -467,8 +470,7 @@ def metafunc(self, with_bigint, *args, **kwargs): else: f(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'bigint': (True,)}) + parameterize(metafunc, {'': (False,), 'bigint': (True,)}) return metafunc @@ -486,8 +488,7 @@ def metafunc(self, with_wasm64, *args, **kwargs): else: f(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'wasm64': (True,)}) + parameterize(metafunc, {'': (False,), 'wasm64': (True,)}) return metafunc @@ -506,8 +507,7 @@ def metafunc(self, with_wasm2js, *args, **kwargs): else: f(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'wasm2js': (True,)}) + parameterize(metafunc, {'': (False,), 'wasm2js': (True,)}) return metafunc @@ -520,11 +520,13 @@ def can_do_standalone(self, impure=False): # This is way to detect the core_2gb test mode in test_core.py if self.get_setting('INITIAL_MEMORY') == '2200mb': return False - return self.is_wasm() and \ - self.get_setting('STACK_OVERFLOW_CHECK', 0) < 2 and \ - not self.get_setting('MINIMAL_RUNTIME') and \ - not self.get_setting('SAFE_HEAP') and \ - not any(a.startswith('-fsanitize=') for a in self.emcc_args) + return ( + self.is_wasm() + and self.get_setting('STACK_OVERFLOW_CHECK', 0) < 2 + and not self.get_setting('MINIMAL_RUNTIME') + and not self.get_setting('SAFE_HEAP') + and not any(a.startswith('-fsanitize=') for a in self.emcc_args) + ) # Impure means a test that cannot run in a wasm VM yet, as it is not 100% @@ -555,8 +557,7 @@ def metafunc(self, standalone): self.node_args += shared.node_bigint_flags(nodejs) func(self) - parameterize(metafunc, {'': (False,), - 'standalone': (True,)}) + parameterize(metafunc, {'': (False,), 'standalone': (True,)}) return metafunc return decorated @@ -599,9 +600,7 @@ def metafunc(self, mode, *args, **kwargs): self.set_setting('DEFAULT_TO_CXX') f(self, *args, **kwargs) - parameterize(metafunc, {'emscripten': ('emscripten',), - 'wasm': ('wasm',), - 'wasm_exnref': ('wasm_exnref',)}) + parameterize(metafunc, {'emscripten': ('emscripten',), 'wasm': ('wasm',), 'wasm_exnref': ('wasm_exnref',)}) return metafunc @@ -629,9 +628,7 @@ def metafunc(self, mode, *args, **kwargs): self.set_setting('SUPPORT_LONGJMP', 'emscripten') f(self, *args, **kwargs) - parameterize(metafunc, {'emscripten': ('emscripten',), - 'wasm': ('wasm',), - 'wasm_exnref': ('wasm_exnref',)}) + parameterize(metafunc, {'emscripten': ('emscripten',), 'wasm': ('wasm',), 'wasm_exnref': ('wasm_exnref',)}) return metafunc @@ -653,11 +650,11 @@ def limit_size(string): if len(line) > max_line: lines[i] = line[:max_line] + '[..]' if len(lines) > maxlines: - lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:] + lines = lines[0 : maxlines // 2] + ['[..]'] + lines[-maxlines // 2 :] lines.append('(not all output shown. See `limit_size`)') string = '\n'.join(lines) + '\n' if len(string) > maxbytes: - string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:] + string = string[0 : maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2 :] return string @@ -713,8 +710,7 @@ def force_delete_contents(dirname): def find_browser_test_file(filename): - """Looks for files in test/browser and then in test/ - """ + """Looks for files in test/browser and then in test/""" if not os.path.exists(filename): fullname = test_file('browser', filename) if not os.path.exists(fullname): @@ -734,7 +730,8 @@ def parameterize(func, parameters): if prev: # If we're parameterizing 2nd time, construct a cartesian product for various combinations. func._parameterize = { - '_'.join(filter(None, [k1, k2])): v2 + v1 for (k1, v1), (k2, v2) in itertools.product(prev.items(), parameters.items()) + '_'.join(filter(None, [k1, k2])): v2 + v1 + for (k1, v1), (k2, v2) in itertools.product(prev.items(), parameters.items()) } else: func._parameterize = parameters @@ -760,9 +757,11 @@ def test_something_subtest1(self): def test_something_subtest2(self): # runs test_something(4, 5, 6) """ + def decorator(func): parameterize(func, parameters) return func + return decorator @@ -1041,7 +1040,7 @@ def set_temp_dir(self, temp_dir): @classmethod def setUpClass(cls): super().setUpClass() - print('(checking sanity from test runner)') # do this after we set env stuff + print('(checking sanity from test runner)') # do this after we set env stuff shared.check_sanity(force=True) def setUp(self): @@ -1080,7 +1079,7 @@ def setUp(self): emcc_min_node_version = ( int(emcc_min_node_version_str[0:2]), int(emcc_min_node_version_str[2:4]), - int(emcc_min_node_version_str[4:6]) + int(emcc_min_node_version_str[4:6]), ) if node_version < emcc_min_node_version: self.emcc_args += building.get_emcc_node_flags(node_version) @@ -1146,16 +1145,19 @@ def tearDown(self): # They may not be due to us, but e.g. the browser when running browser # tests. Until we figure out a proper solution, ignore some temp file # names that we see on our CI infrastructure. - ignorable_file_prefixes = [ - '/tmp/tmpaddon', - '/tmp/circleci-no-output-timeout', - '/tmp/wasmer' - ] + ignorable_file_prefixes = ['/tmp/tmpaddon', '/tmp/circleci-no-output-timeout', '/tmp/wasmer'] left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run) - left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])] + left_over_files = [ + f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes]) + ] if len(left_over_files): - print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr) + print( + 'ERROR: After running test, there are ' + + str(len(left_over_files)) + + ' new temporary files/directories left behind:', + file=sys.stderr, + ) for f in left_over_files: print('leaked file: ' + f, file=sys.stderr) self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!') @@ -1216,7 +1218,17 @@ def add_on_exit(self, code): # libraries, for example def get_emcc_args(self, main_file=False, compile_only=False, asm_only=False): def is_ldflag(f): - return any(f.startswith(s) for s in ['-sEXPORT_ES6', '-sPROXY_TO_PTHREAD', '-sENVIRONMENT=', '--pre-js=', '--post-js=', '-sPTHREAD_POOL_SIZE=']) + return any( + f.startswith(s) + for s in [ + '-sEXPORT_ES6', + '-sPROXY_TO_PTHREAD', + '-sENVIRONMENT=', + '--pre-js=', + '--post-js=', + '-sPTHREAD_POOL_SIZE=', + ] + ) args = self.serialize_settings(compile_only or asm_only) + self.emcc_args if asm_only: @@ -1255,7 +1267,9 @@ def verify_es5(self, filename): self.fail('es-check failed to verify ES5 output compliance') # Build JavaScript code from source code - def build(self, filename, libraries=None, includes=None, force_c=False, js_outfile=True, emcc_args=None, output_basename=None): + def build( + self, filename, libraries=None, includes=None, force_c=False, js_outfile=True, emcc_args=None, output_basename=None + ): if not os.path.exists(filename): filename = test_file(filename) suffix = '.js' if js_outfile else '.wasm' @@ -1293,7 +1307,7 @@ def get_func(self, src, name): elif src[t] == '}': n -= 1 if n == 0: - return src[start:t + 1] + return src[start : t + 1] t += 1 assert t < len(src) @@ -1314,7 +1328,9 @@ def count_funcs(self, javascript_file): return num_funcs def count_wasm_contents(self, wasm_binary, what): - out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout + out = self.run_process( + [os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE + ).stdout # output is something like # [?] : 125 for line in out.splitlines(): @@ -1363,9 +1379,7 @@ def cleanup(line): assert len(long_lines) == 1 return '\n'.join(lines) - def run_js(self, filename, engine=None, args=None, - assert_returncode=0, - interleaved_output=True): + def run_js(self, filename, engine=None, args=None, assert_returncode=0, interleaved_output=True): # use files, as PIPE can get too full and hang us stdout_file = self.in_dir('stdout') stderr_file = None @@ -1384,10 +1398,7 @@ def run_js(self, filename, engine=None, args=None, if engine == config.V8_ENGINE: engine = engine + self.v8_args try: - jsrun.run_js(filename, engine, args, - stdout=stdout, - stderr=stderr, - assert_returncode=assert_returncode) + jsrun.run_js(filename, engine, args, stdout=stdout, stderr=stderr, assert_returncode=assert_returncode) except subprocess.TimeoutExpired as e: timeout_error = e except subprocess.CalledProcessError as e: @@ -1421,7 +1432,10 @@ def run_js(self, filename, engine=None, args=None, if assert_returncode == NON_ZERO: self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret)) else: - self.fail('JS subprocess failed (%s): %s (expected=%s). Output:\n%s' % (error.cmd, error.returncode, assert_returncode, ret)) + self.fail( + 'JS subprocess failed (%s): %s (expected=%s). Output:\n%s' + % (error.cmd, error.returncode, assert_returncode, ret) + ) # We should pass all strict mode checks self.assertNotContained('strict warning:', ret) @@ -1445,21 +1459,18 @@ def assertPathsIdentical(self, path1, path2): # Tests that the given two multiline text content are identical, modulo line # ending differences (\r\n on Windows, \n on Unix). - def assertTextDataIdentical(self, text1, text2, msg=None, - fromfile='expected', tofile='actual'): + def assertTextDataIdentical(self, text1, text2, msg=None, fromfile='expected', tofile='actual'): text1 = text1.replace('\r\n', '\n') text2 = text2.replace('\r\n', '\n') return self.assertIdentical(text1, text2, msg, fromfile, tofile) - def assertIdentical(self, values, y, msg=None, - fromfile='expected', tofile='actual'): + def assertIdentical(self, values, y, msg=None, fromfile='expected', tofile='actual'): if type(values) not in (list, tuple): values = [values] for x in values: if x == y: - return # success - diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(), - fromfile=fromfile, tofile=tofile) + return # success + diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(), fromfile=fromfile, tofile=tofile) diff = ''.join([a.rstrip() + '\n' for a in diff_lines]) if EMTEST_VERBOSE: print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y))) @@ -1487,12 +1498,10 @@ def assertFileContents(self, filename, contents): return if not os.path.exists(filename): - self.fail('Test expectation file not found: ' + filename + '.\n' + - 'Run with --rebaseline to generate.') + self.fail('Test expectation file not found: ' + filename + '.\n' + 'Run with --rebaseline to generate.') expected_content = read_file(filename) message = "Run with --rebaseline to automatically update expectations" - self.assertTextDataIdentical(expected_content, contents, message, - filename, filename + '.new') + self.assertTextDataIdentical(expected_content, contents, message, filename, filename + '.new') def assertContained(self, values, string, additional_info='', regex=False): if callable(string): @@ -1512,14 +1521,14 @@ def assertContained(self, values, string, additional_info='', regex=False): if not any(v in string for v in values): diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual') diff = ''.join(a.rstrip() + '\n' for a in diff) - self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % ( - limit_size(values[0]), limit_size(string), limit_size(diff), - additional_info - )) + self.fail( + "Expected to find '%s' in '%s', diff:\n\n%s\n%s" + % (limit_size(values[0]), limit_size(string), limit_size(diff), additional_info) + ) def assertNotContained(self, value, string): if callable(value): - value = value() # lazy loading + value = value() # lazy loading if callable(string): string = string() if value in string: @@ -1532,10 +1541,8 @@ def assertContainedIf(self, value, string, condition): self.assertNotContained(value, string) def assertBinaryEqual(self, file1, file2): - self.assertEqual(os.path.getsize(file1), - os.path.getsize(file2)) - self.assertEqual(read_binary(file1), - read_binary(file2)) + self.assertEqual(os.path.getsize(file1), os.path.getsize(file2)) + self.assertEqual(read_binary(file1), read_binary(file2)) library_cache: Dict[str, Tuple[str, object]] = {} @@ -1544,10 +1551,19 @@ def get_build_dir(self): ensure_dir(ret) return ret - def get_library(self, name, generated_libs, configure=['sh', './configure'], # noqa - configure_args=None, make=None, make_args=None, - env_init=None, cache_name_extra='', native=False, - force_rebuild=False): + def get_library( + self, + name, + generated_libs, + configure=['sh', './configure'], # noqa + configure_args=None, + make=None, + make_args=None, + env_init=None, + cache_name_extra='', + native=False, + force_rebuild=False, + ): if make is None: make = ['make'] if env_init is None: @@ -1566,7 +1582,13 @@ def get_library(self, name, generated_libs, configure=['sh', './configure'], # emcc_args = self.get_emcc_args(compile_only=True) hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8') - cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra + cache_name = ( + name + + ','.join([opt for opt in emcc_args if len(opt) < 7]) + + '_' + + hashlib.md5(hash_input).hexdigest() + + cache_name_extra + ) valid_chars = "_%s%s" % (string.ascii_letters, string.digits) cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name]) @@ -1589,9 +1611,19 @@ def get_library(self, name, generated_libs, configure=['sh', './configure'], # cflags = ' '.join(emcc_args) env_init.setdefault('CFLAGS', cflags) env_init.setdefault('CXXFLAGS', cflags) - return build_library(name, build_dir, output_dir, generated_libs, configure, - make, make_args, self.library_cache, - cache_name, env_init=env_init, native=native) + return build_library( + name, + build_dir, + output_dir, + generated_libs, + configure, + make, + make_args, + self.library_cache, + cache_name, + env_init=env_init, + native=native, + ) def clear(self): force_delete_contents(self.get_dir()) @@ -1650,7 +1682,9 @@ def expect_fail(self, cmd, expect_traceback=False, **args): # when run under browser it excercises how dynamic linker handles concurrency # - because B and C are loaded in parallel. def _test_dylink_dso_needed(self, do_run): - create_file('liba.cpp', r''' + create_file( + 'liba.cpp', + r''' #include #include @@ -1672,9 +1706,12 @@ def _test_dylink_dso_needed(self, do_run): }; static ainit _; - ''') + ''', + ) - create_file('libb.c', r''' + create_file( + 'libb.c', + r''' #include void afunc(const char *s); @@ -1682,9 +1719,12 @@ def _test_dylink_dso_needed(self, do_run): EMSCRIPTEN_KEEPALIVE void bfunc() { afunc("b"); } - ''') + ''', + ) - create_file('libc.c', r''' + create_file( + 'libc.c', + r''' #include void afunc(const char *s); @@ -1692,7 +1732,8 @@ def _test_dylink_dso_needed(self, do_run): EMSCRIPTEN_KEEPALIVE void cfunc() { afunc("c"); } - ''') + ''', + ) # _test_dylink_dso_needed can be potentially called several times by a test. # reset dylink-related options first. @@ -1717,7 +1758,8 @@ def ccshared(src, linkto=None): self.set_setting('MAIN_MODULE') extra_args = ['-L.', 'libb' + so, 'libc' + so] - do_run(r''' + do_run( + r''' #ifdef __cplusplus extern "C" { #endif @@ -1733,12 +1775,15 @@ def ccshared(src, linkto=None): return 0; } ''', - 'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args) + 'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', + emcc_args=extra_args, + ) extra_args = [] for libname in ('liba', 'libb', 'libc'): extra_args += ['--embed-file', libname + so] - do_run(r''' + do_run( + r''' #include #include #include @@ -1762,8 +1807,11 @@ def ccshared(src, linkto=None): cfunc_ptr(); return 0; } - ''' % locals(), - 'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args) + ''' + % locals(), + 'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', + emcc_args=extra_args, + ) def do_run(self, src, expected_output=None, force_c=False, **kwargs): if 'no_build' in kwargs: @@ -1794,23 +1842,37 @@ def do_run_in_out_file_test(self, srcfile, **kwargs): return output ## Does a complete test - builds, runs, checks output, etc. - def _build_and_run(self, filename, expected_output, args=None, - no_build=False, - libraries=None, - includes=None, - assert_returncode=0, assert_identical=False, assert_all=False, - check_for_error=True, force_c=False, emcc_args=None, - interleaved_output=True, - regex=False, - output_basename=None): + def _build_and_run( + self, + filename, + expected_output, + args=None, + no_build=False, + libraries=None, + includes=None, + assert_returncode=0, + assert_identical=False, + assert_all=False, + check_for_error=True, + force_c=False, + emcc_args=None, + interleaved_output=True, + regex=False, + output_basename=None, + ): logger.debug(f'_build_and_run: {filename}') if no_build: js_file = filename else: - js_file = self.build(filename, libraries=libraries, includes=includes, - force_c=force_c, emcc_args=emcc_args, - output_basename=output_basename) + js_file = self.build( + filename, + libraries=libraries, + includes=includes, + force_c=force_c, + emcc_args=emcc_args, + output_basename=output_basename, + ) self.assertExists(js_file) engines = self.js_engines.copy() @@ -1826,9 +1888,9 @@ def _build_and_run(self, filename, expected_output, args=None, if len(engines) == 0: self.fail('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG) for engine in engines: - js_output = self.run_js(js_file, engine, args, - assert_returncode=assert_returncode, - interleaved_output=interleaved_output) + js_output = self.run_js( + js_file, engine, args, assert_returncode=assert_returncode, interleaved_output=interleaved_output + ) js_output = js_output.replace('\r\n', '\n') if expected_output: if type(expected_output) not in [list, tuple]: @@ -1858,16 +1920,18 @@ def get_freetype_library(self): # And because gnu-offsetof-extensions is a new warning: '-Wno-unknown-warning-option', ] - return self.get_library(os.path.join('third_party', 'freetype'), - os.path.join('objs', '.libs', 'libfreetype.a'), - configure_args=['--disable-shared', '--without-zlib']) + return self.get_library( + os.path.join('third_party', 'freetype'), + os.path.join('objs', '.libs', 'libfreetype.a'), + configure_args=['--disable-shared', '--without-zlib'], + ) def get_poppler_library(self, env_init=None): freetype = self.get_freetype_library() self.emcc_args += [ '-I' + test_file('third_party/freetype/include'), - '-I' + test_file('third_party/poppler/include') + '-I' + test_file('third_party/poppler/include'), ] # Poppler has some pretty glaring warning. Suppress them to keep the @@ -1893,10 +1957,24 @@ def get_poppler_library(self, env_init=None): env_init['FONTCONFIG_LIBS'] = ' ' poppler = self.get_library( - os.path.join('third_party', 'poppler'), - [os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')], - env_init=env_init, - configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared']) + os.path.join('third_party', 'poppler'), + [ + os.path.join('utils', 'pdftoppm.o'), + os.path.join('utils', 'parseargs.o'), + os.path.join('poppler', '.libs', 'libpoppler.a'), + ], + env_init=env_init, + configure_args=[ + '--disable-libjpeg', + '--disable-libpng', + '--disable-poppler-qt', + '--disable-poppler-qt4', + '--disable-cms', + '--disable-cairo-output', + '--disable-abiword-output', + '--disable-shared', + ], + ) return poppler + freetype @@ -1912,10 +1990,13 @@ def get_zlib_library(self, cmake): # https://github.com/emscripten-core/emscripten/issues/16908 is fixed self.emcc_args.append('-Wno-pointer-sign') if cmake: - rtn = self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), - configure=['cmake', '.'], - make=['cmake', '--build', '.', '--'], - make_args=[]) + rtn = self.get_library( + os.path.join('third_party', 'zlib'), + os.path.join('libz.a'), + configure=['cmake', '.'], + make=['cmake', '--build', '.', '--'], + make_args=[], + ) else: rtn = self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a']) self.emcc_args = old_args @@ -2051,7 +2132,7 @@ def log_request(code=0, size=0): SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm' httpd = HTTPServer(('localhost', port), TestServerHandler) - httpd.serve_forever() # test runner will kill us + httpd.serve_forever() # test runner will kill us class Reporting(Enum): @@ -2059,6 +2140,7 @@ class Reporting(Enum): code for reporting results back to the browser. This enum allows tests to decide what type of support code they need/want. """ + NONE = 0 # Include the JS helpers for reporting results JS_ONLY = 1 @@ -2113,7 +2195,9 @@ def setUpClass(cls): return cls.harness_in_queue = multiprocessing.Queue() cls.harness_out_queue = multiprocessing.Queue() - cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.PORT)) + cls.harness_server = multiprocessing.Process( + target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.PORT) + ) cls.harness_server.start() print('[Browser harness server on process %d]' % cls.harness_server.pid) cls.browser_open(cls.HARNESS_URL) @@ -2157,10 +2241,7 @@ def run_browser(self, html_file, expected=None, message=None, timeout=None, extr assert not (message and expected), 'run_browser expects `expected` or `message`, but not both' if expected is not None: try: - self.harness_in_queue.put(( - 'http://localhost:%s/%s' % (self.PORT, html_file), - self.get_dir() - )) + self.harness_in_queue.put(('http://localhost:%s/%s' % (self.PORT, html_file), self.get_dir())) if timeout is None: timeout = self.BROWSER_TIMEOUT try: @@ -2177,7 +2258,7 @@ def run_browser(self, html_file, expected=None, message=None, timeout=None, extr # us to also fail the test self.fail('browser harness error') if output.startswith('/report_result?skipped:'): - self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip()) + self.skipTest(unquote(output[len('/report_result?skipped:') :]).strip()) else: # verify the result, and try again if we should do so output = unquote(output) @@ -2191,7 +2272,7 @@ def run_browser(self, html_file, expected=None, message=None, timeout=None, extr else: raise e finally: - time.sleep(0.1) # see comment about Windows above + time.sleep(0.1) # see comment about Windows above self.assert_out_queue_empty('this test') else: webbrowser.open_new(os.path.abspath(html_file)) @@ -2212,8 +2293,11 @@ def compile_btest(self, filename, args, reporting=Reporting.FULL): if reporting == Reporting.FULL: # If C reporting (i.e. the REPORT_RESULT macro) is required we # also include report_result.c and force-include report_result.h - self.run_process([EMCC, '-c', '-I' + TEST_ROOT, - test_file('report_result.c')] + self.get_emcc_args(compile_only=True) + (['-fPIC'] if '-fPIC' in args else [])) + self.run_process( + [EMCC, '-c', '-I' + TEST_ROOT, test_file('report_result.c')] + + self.get_emcc_args(compile_only=True) + + (['-fPIC'] if '-fPIC' in args else []) + ) args += ['report_result.o', '-include', test_file('report_result.h')] if EMTEST_BROWSER == 'node': args.append('-DEMTEST_NODE') @@ -2235,12 +2319,18 @@ def btest_exit(self, filename, assert_returncode=0, *args, **kwargs): kwargs['expected'] = 'exit:%d' % assert_returncode return self.btest(filename, *args, **kwargs) - def btest(self, filename, expected=None, - post_build=None, - args=None, url_suffix='', timeout=None, - extra_tries=1, - reporting=Reporting.FULL, - output_basename='test'): + def btest( + self, + filename, + expected=None, + post_build=None, + args=None, + url_suffix='', + timeout=None, + extra_tries=1, + reporting=Reporting.FULL, + output_basename='test', + ): assert expected, 'a btest must have an expected output' if args is None: args = [] @@ -2267,23 +2357,20 @@ def btest(self, filename, expected=None, output = self.run_js('test.js') self.assertContained('RESULT: ' + expected[0], output) else: - self.run_browser(outfile + url_suffix, expected=['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries) + self.run_browser( + outfile + url_suffix, + expected=['/report_result?' + e for e in expected], + timeout=timeout, + extra_tries=extra_tries, + ) ################################################################################################### -def build_library(name, - build_dir, - output_dir, - generated_libs, - configure, - make, - make_args, - cache, - cache_name, - env_init, - native): +def build_library( + name, build_dir, output_dir, generated_libs, configure, make, make_args, cache, cache_name, env_init, native +): """Build a library and cache the result. We build the library file once and cache it for all our tests. (We cache in memory since the test directory is destroyed and recreated for each test. Note that we cache @@ -2326,8 +2413,7 @@ def build_library(name, with open(os.path.join(project_dir, 'configure_err'), 'w') as err: stdout = out if EMTEST_BUILD_VERBOSE < 2 else None stderr = err if EMTEST_BUILD_VERBOSE < 1 else None - shared.run_process(configure, env=env, stdout=stdout, stderr=stderr, - cwd=project_dir) + shared.run_process(configure, env=env, stdout=stdout, stderr=stderr, cwd=project_dir) except subprocess.CalledProcessError: print('-- configure stdout --') print(read_file(Path(project_dir, 'configure_out'))) @@ -2355,8 +2441,7 @@ def open_make_err(mode='r'): with open_make_err('w') as make_err: stdout = make_out if EMTEST_BUILD_VERBOSE < 2 else None stderr = make_err if EMTEST_BUILD_VERBOSE < 1 else None - shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env, - cwd=project_dir) + shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env, cwd=project_dir) except subprocess.CalledProcessError: with open_make_out() as f: print('-- make stdout --') diff --git a/test/gen_large_switchcase.py b/test/gen_large_switchcase.py index e9f5cef0825f2..e8883a903108f 100755 --- a/test/gen_large_switchcase.py +++ b/test/gen_large_switchcase.py @@ -15,7 +15,8 @@ i += incr incr = (incr % 5) + 1 -print('''#include +print( + '''#include #include #include @@ -23,14 +24,19 @@ { switch(x) { -''' + cases + ''' +''' + + cases + + ''' default: return "default"; } } int main() { - for(int i = 0; i < ''' + str((num_cases + 99) // 100) + '''; ++i) + for(int i = 0; i < ''' + + str((num_cases + 99) // 100) + + '''; ++i) printf("%d: %s\\n", i*301, foo(i*301)); printf("Success!\\n"); -}''') +}''' +) diff --git a/test/gen_many_js_functions.py b/test/gen_many_js_functions.py index dccc74aca2b29..80138cf448796 100644 --- a/test/gen_many_js_functions.py +++ b/test/gen_many_js_functions.py @@ -11,7 +11,10 @@ def func_name(i): - return 'thisIsAFunctionWithVeryLongFunctionNameThatWouldBeGreatToBeMinifiedWhenImportingToAsmJsOrWasmSideCodeToCallOtherwiseCodeSizesWillBeLargeAndNetworkTransfersBecomeVerySlowThatUsersWillGoAwayAndVisitSomeOtherSiteInsteadAndThenWebAssemblyDeveloperIsSadOrEvenWorseNobodyNoticesButInternetPipesWillGetMoreCongestedWhichContributesToGlobalWarmingAndThenEveryoneElseWillBeSadAsWellEspeciallyThePolarBearsAndPenguinsJustThinkAboutThePenguins' + str(i + 1) + return ( + 'thisIsAFunctionWithVeryLongFunctionNameThatWouldBeGreatToBeMinifiedWhenImportingToAsmJsOrWasmSideCodeToCallOtherwiseCodeSizesWillBeLargeAndNetworkTransfersBecomeVerySlowThatUsersWillGoAwayAndVisitSomeOtherSiteInsteadAndThenWebAssemblyDeveloperIsSadOrEvenWorseNobodyNoticesButInternetPipesWillGetMoreCongestedWhichContributesToGlobalWarmingAndThenEveryoneElseWillBeSadAsWellEspeciallyThePolarBearsAndPenguinsJustThinkAboutThePenguins' + + str(i + 1) + ) def generate_js_library_with_lots_of_functions(out_file): @@ -38,7 +41,13 @@ def generate_c_program_that_calls_js_library_with_lots_of_functions(out_file): for i in range(NUM_FUNCS_TO_GENERATE): f.write(' sum += ' + func_name(i) + '();\n') - f.write('\n printf("Sum of numbers from 1 to ' + str(NUM_FUNCS_TO_GENERATE) + ': %d (expected ' + str(int((NUM_FUNCS_TO_GENERATE * (NUM_FUNCS_TO_GENERATE + 1)) / 2)) + ')\\n", sum);\n') + f.write( + '\n printf("Sum of numbers from 1 to ' + + str(NUM_FUNCS_TO_GENERATE) + + ': %d (expected ' + + str(int((NUM_FUNCS_TO_GENERATE * (NUM_FUNCS_TO_GENERATE + 1)) / 2)) + + ')\\n", sum);\n' + ) f.write('}\n') diff --git a/test/jsrun.py b/test/jsrun.py index b918becaed8e0..8f8a58d8e783a 100644 --- a/test/jsrun.py +++ b/test/jsrun.py @@ -12,7 +12,7 @@ from tools import shared, utils -WORKING_ENGINES = {} # Holds all configured engines and whether they work: maps path -> True/False +WORKING_ENGINES = {} # Holds all configured engines and whether they work: maps path -> True/False DEFAULT_TIMEOUT = 5 * 60 @@ -83,10 +83,19 @@ def require_engine(engine): sys.exit(1) -def run_js(filename, engine, args=None, - stdin=None, stdout=PIPE, stderr=None, cwd=None, - full_output=False, assert_returncode=0, skip_check=False, - timeout=DEFAULT_TIMEOUT): +def run_js( + filename, + engine, + args=None, + stdin=None, + stdout=PIPE, + stderr=None, + cwd=None, + full_output=False, + assert_returncode=0, + skip_check=False, + timeout=DEFAULT_TIMEOUT, +): """Execute javascript code generated by tests, with possible timeout.""" # We used to support True here but we no longer do. Assert here just in case. @@ -100,13 +109,8 @@ def run_js(filename, engine, args=None, print(f"Running: '{shared.shlex_join(command)}'") try: proc = subprocess.run( - command, - stdin=stdin, - stdout=stdout, - stderr=stderr, - cwd=cwd, - timeout=timeout, - universal_newlines=True) + command, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, timeout=timeout, universal_newlines=True + ) except Exception: # the failure may be because the engine is not present. show the proper # error in that case diff --git a/test/other/ports/external.py b/test/other/ports/external.py index 5fb8c8d764f4f..2c5f57915bbad 100644 --- a/test/other/ports/external.py +++ b/test/other/ports/external.py @@ -18,12 +18,7 @@ } # user options (from --use-port) -opts: Dict[str, Optional[str]] = { - 'value1': None, - 'value2': None, - 'value3': "v3", - 'dependency': None -} +opts: Dict[str, Optional[str]] = {'value1': None, 'value2': None, 'value3': "v3", 'dependency': None} deps = ['sdl2_image:formats=jpg'] diff --git a/test/parallel_testsuite.py b/test/parallel_testsuite.py index 05f3717f47f83..013d0490835cc 100644 --- a/test/parallel_testsuite.py +++ b/test/parallel_testsuite.py @@ -91,11 +91,12 @@ def combine_results(self, result, buffered_results): return result -class BufferedParallelTestResult(): +class BufferedParallelTestResult: """A picklable struct used to communicate test results across processes Fulfills the interface for unittest.TestResult """ + def __init__(self): self.buffered_result = None @@ -145,8 +146,9 @@ def addError(self, test, err): self.buffered_result = BufferedTestError(test, err) -class BufferedTestBase(): +class BufferedTestBase: """Abstract class that holds test result data, split by type of result.""" + def __init__(self, test, err=None): self.test = test if err: @@ -181,6 +183,7 @@ def fixup_fake_exception(fake_exc): # the data def make_wrapper(rtn): return lambda: rtn + ex.tb_frame.f_code.co_positions = make_wrapper(ex.tb_frame.f_code.positions) ex = ex.tb_next @@ -209,7 +212,7 @@ def updateResult(self, result): result.addUnexpectedSuccess(self.test) -class FakeTraceback(): +class FakeTraceback: """A fake version of a traceback object that is picklable across processes. Python's traceback objects contain hidden stack information that isn't able @@ -228,14 +231,14 @@ def __init__(self, tb): self.tb_lasti = tb.tb_lasti -class FakeFrame(): +class FakeFrame: def __init__(self, f): self.f_code = FakeCode(f.f_code) # f.f_globals is not picklable, not used in stack traces, and needs to be iterable self.f_globals = [] -class FakeCode(): +class FakeCode: def __init__(self, co): self.co_filename = co.co_filename self.co_name = co.co_name diff --git a/test/runner.py b/test/runner.py index 5896fe4f15b9e..5ae1bacbafca6 100755 --- a/test/runner.py +++ b/test/runner.py @@ -135,7 +135,7 @@ def get_all_tests(modules): def get_crossplatform_tests(modules): - suites = ['core0', 'other', 'sanity'] # We don't need all versions of every test + suites = ['core0', 'other', 'sanity'] # We don't need all versions of every test crossplatform_tests = [] # Walk over the test suites and find the test functions with the # is_crossplatform_test attribute applied by @crossplatform decorator @@ -248,15 +248,18 @@ def print_random_test_statistics(num_tests): std = 0.5 / math.sqrt(num_tests) expected = 100.0 * (1.0 - std) print() - print('running those %d randomly-selected tests. if they all pass, then there is a ' - 'greater than 95%% chance that at least %.2f%% of the test suite will pass' - % (num_tests, expected)) + print( + 'running those %d randomly-selected tests. if they all pass, then there is a ' + 'greater than 95%% chance that at least %.2f%% of the test suite will pass' % (num_tests, expected) + ) print() def show(): - print('if all tests passed then there is a greater than 95%% chance that at least ' - '%.2f%% of the test suite will pass' - % (expected)) + print( + 'if all tests passed then there is a greater than 95%% chance that at least ' + '%.2f%% of the test suite will pass' % (expected) + ) + atexit.register(show) @@ -314,6 +317,7 @@ def flattened_tests(loaded_tests): tests.extend(subsuite) return tests + def suite_for_module(module, tests): suite_supported = module.__name__ in ('test_core', 'test_other', 'test_posixtest') if not common.EMTEST_SAVE_DIR and not shared.DEBUG: @@ -338,9 +342,9 @@ def run_tests(options, suites): os.makedirs('out', exist_ok=True) # output fd must remain open until after testRunner.run() below output = open('out/test-results.xml', 'wb') - import xmlrunner # type: ignore - testRunner = xmlrunner.XMLTestRunner(output=output, verbosity=2, - failfast=options.failfast) + import xmlrunner # type: ignore + + testRunner = xmlrunner.XMLTestRunner(output=output, verbosity=2, failfast=options.failfast) print('Writing XML test output to ' + os.path.abspath(output.name)) else: testRunner = unittest.TextTestRunner(verbosity=2, failfast=options.failfast) @@ -348,8 +352,13 @@ def run_tests(options, suites): for mod_name, suite in suites: print('Running %s: (%s tests)' % (mod_name, suite.countTestCases())) res = testRunner.run(suite) - msg = ('%s: %s run, %s errors, %s failures, %s skipped' % - (mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped))) + msg = '%s: %s run, %s errors, %s failures, %s skipped' % ( + mod_name, + res.testsRun, + len(res.errors), + len(res.failures), + len(res.skipped), + ) num_failures += len(res.errors) + len(res.failures) + len(res.unexpectedSuccesses) resultMessages.append(msg) @@ -365,32 +374,41 @@ def run_tests(options, suites): def parse_args(args): parser = argparse.ArgumentParser(prog='runner.py', description=__doc__) - parser.add_argument('--save-dir', action='store_true', - help='Save the temporary directory used during for each ' - 'test. Implies --cores=1. Defaults to true when running a single test') - parser.add_argument('--no-clean', action='store_true', - help='Do not clean the temporary directory before each test run') + parser.add_argument( + '--save-dir', + action='store_true', + help='Save the temporary directory used during for each ' + 'test. Implies --cores=1. Defaults to true when running a single test', + ) + parser.add_argument( + '--no-clean', action='store_true', help='Do not clean the temporary directory before each test run' + ) parser.add_argument('--verbose', '-v', action='store_true') parser.add_argument('--all-engines', action='store_true') parser.add_argument('--detect-leaks', action='store_true') parser.add_argument('--skip-slow', action='store_true', help='Skip tests marked as slow') - parser.add_argument('--cores', '-j', - help='Set the number tests to run in parallel. Defaults ' - 'to the number of CPU cores.', default=None) - parser.add_argument('--rebaseline', action='store_true', - help='Automatically update test expectations for tests that support it.') - parser.add_argument('--browser', - help='Command to launch web browser in which to run browser tests.') + parser.add_argument( + '--cores', + '-j', + help='Set the number tests to run in parallel. Defaults ' 'to the number of CPU cores.', + default=None, + ) + parser.add_argument( + '--rebaseline', action='store_true', help='Automatically update test expectations for tests that support it.' + ) + parser.add_argument('--browser', help='Command to launch web browser in which to run browser tests.') parser.add_argument('tests', nargs='*') parser.add_argument('--failfast', action='store_true') parser.add_argument('--start-at', metavar='NAME', help='Skip all tests up until ') - parser.add_argument('--continue', dest='_continue', action='store_true', - help='Resume from the last run test.' - 'Useful when combined with --failfast') + parser.add_argument( + '--continue', + dest='_continue', + action='store_true', + help='Resume from the last run test.' 'Useful when combined with --failfast', + ) parser.add_argument('--force64', action='store_true') parser.add_argument('--crossplatform-only', action='store_true') - parser.add_argument('--repeat', type=int, default=1, - help='Repeat each test N times (default: 1).') + parser.add_argument('--repeat', type=int, default=1, help='Repeat each test N times (default: 1).') return parser.parse_args() diff --git a/test/test_benchmark.py b/test/test_benchmark.py index c0c615fc138e5..bdca3c8138a20 100644 --- a/test/test_benchmark.py +++ b/test/test_benchmark.py @@ -56,7 +56,7 @@ EMTEST_BENCHMARKERS = os.getenv('EMTEST_BENCHMARKERS', 'clang,v8,v8-lto,v8-ctors') -class Benchmarker(): +class Benchmarker: # Whether to record statistics. Set by SizeBenchmarker. record_stats = False @@ -78,7 +78,7 @@ def bench(self, args, output_parser=None, reps=TEST_REPS, expected_output=None): if expected_output is not None and expected_output not in output: raise ValueError('Incorrect benchmark output:\n' + output) - if not output_parser or args == ['0']: # if arg is 0, we are not running code, and have no output to parse + if not output_parser or args == ['0']: # if arg is 0, we are not running code, and have no output to parse curr = time.time() - start else: try: @@ -101,11 +101,15 @@ def display(self, baseline=None): sorted_times = sorted(self.times) count = len(sorted_times) if count % 2 == 0: - median = sum(sorted_times[count // 2 - 1:count // 2 + 1]) / 2 + median = sum(sorted_times[count // 2 - 1 : count // 2 + 1]) / 2 else: median = sorted_times[count // 2] - print(' %10s: mean: %4.3f (+-%4.3f) secs median: %4.3f range: %4.3f-%4.3f (noise: %4.3f%%) (%d runs)' % (self.name, mean, std, median, min(self.times), max(self.times), 100 * std / mean, self.reps), end=' ') + print( + ' %10s: mean: %4.3f (+-%4.3f) secs median: %4.3f range: %4.3f-%4.3f (noise: %4.3f%%) (%d runs)' + % (self.name, mean, std, median, min(self.times), max(self.times), 100 * std / mean, self.reps), + end=' ', + ) if baseline: mean_baseline = sum(baseline.times) / len(baseline.times) @@ -119,14 +123,18 @@ def display(self, baseline=None): recorded_stats = [] def add_stat(name, size, gzip_size): - recorded_stats.append({ - 'value': name, - 'measurement': size, - }) - recorded_stats.append({ - 'value': name + ' (gzipped)', - 'measurement': gzip_size, - }) + recorded_stats.append( + { + 'value': name, + 'measurement': size, + } + ) + recorded_stats.append( + { + 'value': name + ' (gzipped)', + 'measurement': gzip_size, + } + ) total_size = 0 total_gzip_size = 0 @@ -160,7 +168,9 @@ def __init__(self, name, cc, cxx, args=None): self.cxx = cxx self.args = args or [OPTIMIZATIONS] - def build(self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser): + def build( + self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser + ): native_args = native_args or [] shared_args = shared_args or [] self.parent = parent @@ -170,11 +180,14 @@ def build(self, parent, filename, args, shared_args, emcc_args, native_args, nat native_args = native_args + lib_builder(self.name, native=True, env_init=env) if not native_exec: compiler = self.cxx if filename.endswith('cpp') else self.cc - cmd = compiler + [ - '-fno-math-errno', - filename, - '-o', filename + '.native' - ] + self.args + shared_args + native_args + clang_native.get_clang_native_args() + cmd = ( + compiler + + ['-fno-math-errno', filename, '-o', filename + '.native'] + + self.args + + shared_args + + native_args + + clang_native.get_clang_native_args() + ) # print(cmd) run_process(cmd, env=clang_native.get_clang_native_env()) else: @@ -196,11 +209,9 @@ def get_size_text(self): def run_binaryen_opts(filename, opts): - run_process([ - os.path.join(building.get_binaryen_bin(), 'wasm-opt', '--all-features'), - filename, - '-o', filename - ] + opts) + run_process( + [os.path.join(building.get_binaryen_bin(), 'wasm-opt', '--all-features'), filename, '-o', filename] + opts + ) class EmscriptenBenchmarker(Benchmarker): @@ -213,7 +224,9 @@ def __init__(self, name, engine, extra_args=None, env=None, binaryen_opts=None): self.env.update(env) self.binaryen_opts = binaryen_opts or [] - def build(self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser): + def build( + self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser + ): emcc_args = emcc_args or [] self.filename = filename llvm_root = self.env.get('LLVM') or config.LLVM_ROOT @@ -226,15 +239,24 @@ def build(self, parent, filename, args, shared_args, emcc_args, native_args, nat # This shouldn't be 'emcc_args += ...', because emcc_args is passed in as # a parameter and changes will be visible to the caller. emcc_args = emcc_args + lib_builder('js_' + llvm_root, native=False, env_init=env_init) - final = os.path.dirname(filename) + os.path.sep + self.name + ('_' if self.name else '') + os.path.basename(filename) + '.js' + final = ( + os.path.dirname(filename) + + os.path.sep + + self.name + + ('_' if self.name else '') + + os.path.basename(filename) + + '.js' + ) final = final.replace('.cpp', '') utils.delete_file(final) cmd = [ - EMCC, filename, + EMCC, + filename, OPTIMIZATIONS, '-sINITIAL_MEMORY=256MB', '-sENVIRONMENT=node,shell', - '-o', final + '-o', + final, ] + LLVM_FEATURE_FLAGS if shared_args: cmd += shared_args @@ -294,7 +316,9 @@ def __init__(self, name, engine, args=None, binaryen_opts=None): self.args = args or [OPTIMIZATIONS] self.binaryen_opts = binaryen_opts or [] - def build(self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser): + def build( + self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser + ): cheerp_args = [ '-fno-math-errno', ] @@ -303,20 +327,31 @@ def build(self, parent, filename, args, shared_args, emcc_args, native_args, nat if lib_builder: # build as "native" (so no emcc env stuff), but with all the cheerp stuff # set in the env - cheerp_args = cheerp_args + lib_builder(self.name, native=True, env_init={ - 'CC': CHEERP_BIN + 'clang', - 'CXX': CHEERP_BIN + 'clang++', - 'AR': CHEERP_BIN + '../libexec/cheerp-unknown-none-ar', - 'LD': CHEERP_BIN + 'clang', - 'NM': CHEERP_BIN + 'llvm-nm', - 'LDSHARED': CHEERP_BIN + 'clang', - 'RANLIB': CHEERP_BIN + '../libexec/cheerp-unknown-none-ranlib', - 'CXXFLAGS': "-Wno-c++11-narrowing", - 'CHEERP_PREFIX': CHEERP_BIN + '../', - }) + cheerp_args = cheerp_args + lib_builder( + self.name, + native=True, + env_init={ + 'CC': CHEERP_BIN + 'clang', + 'CXX': CHEERP_BIN + 'clang++', + 'AR': CHEERP_BIN + '../libexec/cheerp-unknown-none-ar', + 'LD': CHEERP_BIN + 'clang', + 'NM': CHEERP_BIN + 'llvm-nm', + 'LDSHARED': CHEERP_BIN + 'clang', + 'RANLIB': CHEERP_BIN + '../libexec/cheerp-unknown-none-ranlib', + 'CXXFLAGS': "-Wno-c++11-narrowing", + 'CHEERP_PREFIX': CHEERP_BIN + '../', + }, + ) if PROFILING: - cheerp_args += ['-cheerp-pretty-code'] # get function names, like emcc --profiling - final = os.path.dirname(filename) + os.path.sep + self.name + ('_' if self.name else '') + os.path.basename(filename) + '.js' + cheerp_args += ['-cheerp-pretty-code'] # get function names, like emcc --profiling + final = ( + os.path.dirname(filename) + + os.path.sep + + self.name + + ('_' if self.name else '') + + os.path.basename(filename) + + '.js' + ) final = final.replace('.cpp', '') utils.delete_file(final) dirs_to_delete = [] @@ -327,12 +362,18 @@ def build(self, parent, filename, args, shared_args, emcc_args, native_args, nat compiler = CHEERP_BIN + '/clang' else: compiler = CHEERP_BIN + '/clang++' - cmd = [compiler] + cheerp_args + [ - '-cheerp-linear-heap-size=256', - '-cheerp-secondary-output-file=' + final.replace('.js', '.wasm'), - filename, - '-o', final - ] + shared_args + cmd = ( + [compiler] + + cheerp_args + + [ + '-cheerp-linear-heap-size=256', + '-cheerp-secondary-output-file=' + final.replace('.js', '.wasm'), + filename, + '-o', + final, + ] + + shared_args + ) # print(' '.join(cmd)) run_process(cmd, stdout=PIPE, stderr=PIPE) self.filename = final @@ -360,7 +401,7 @@ def get_output_files(self): named_benchmarkers = { 'clang': NativeBenchmarker('clang', [CLANG_CC], [CLANG_CXX]), - 'gcc': NativeBenchmarker('gcc', ['gcc', '-no-pie'], ['g++', '-no-pie']), + 'gcc': NativeBenchmarker('gcc', ['gcc', '-no-pie'], ['g++', '-no-pie']), 'size': SizeBenchmarker('size'), 'v8': EmscriptenBenchmarker('v8', aot_v8), 'v8-lto': EmscriptenBenchmarker('v8-lto', aot_v8, ['-flto']), @@ -371,7 +412,7 @@ def get_output_files(self): 'cherp-v8': CheerpBenchmarker('cheerp-v8-wasm', aot_v8), # TODO: ensure no baseline compiler is used, see v8 'sm': EmscriptenBenchmarker('sm', config.SPIDERMONKEY_ENGINE), - 'cherp-sm': CheerpBenchmarker('cheerp-sm-wasm', config.SPIDERMONKEY_ENGINE) + 'cherp-sm': CheerpBenchmarker('cheerp-sm-wasm', config.SPIDERMONKEY_ENGINE), } for name in EMTEST_BENCHMARKERS.split(','): @@ -382,7 +423,7 @@ def get_output_files(self): class benchmark(common.RunnerCore): save_dir = True - stats = [] # type: ignore + stats = [] # type: ignore @classmethod def setUpClass(cls): @@ -398,7 +439,10 @@ def setUpClass(cls): pass try: with common.chdir(os.path.expanduser('~/Dev/mozilla-central')): - fingerprint.append('sm: ' + [line for line in run_process(['hg', 'tip'], stdout=PIPE).stdout.splitlines() if 'changeset' in line][0]) + fingerprint.append( + 'sm: ' + + [line for line in run_process(['hg', 'tip'], stdout=PIPE).stdout.splitlines() if 'changeset' in line][0] + ) except Exception: pass fingerprint.append('llvm: ' + config.LLVM_ROOT) @@ -408,11 +452,7 @@ def setUpClass(cls): def tearDownClass(cls): super().tearDownClass() if cls.stats: - output = { - 'version': 1, - 'git_hash': '', - 'results': cls.stats - } + output = {'version': 1, 'git_hash': '', 'results': cls.stats} utils.write_file('stats.json', json.dumps(output, indent=2) + '\n') # avoid depending on argument reception from the commandline @@ -422,21 +462,36 @@ def hardcode_arguments(self, code): main_pattern = 'int main(int argc, char **argv)' assert main_pattern in code code = code.replace(main_pattern, 'int benchmark_main(int argc, char **argv)') - code += ''' + code += ( + ''' int main() { int newArgc = 2; char* newArgv[] = { (char*)"./program.exe", (char*)"%s" }; int ret = benchmark_main(newArgc, newArgv); return ret; } - ''' % DEFAULT_ARG + ''' + % DEFAULT_ARG + ) return code - def do_benchmark(self, name, src, expected_output='FAIL', args=None, - emcc_args=None, native_args=None, shared_args=None, - force_c=False, reps=TEST_REPS, native_exec=None, - output_parser=None, args_processor=None, lib_builder=None, - skip_native=False): + def do_benchmark( + self, + name, + src, + expected_output='FAIL', + args=None, + emcc_args=None, + native_args=None, + shared_args=None, + force_c=False, + reps=TEST_REPS, + native_exec=None, + output_parser=None, + args_processor=None, + lib_builder=None, + skip_native=False, + ): if not benchmarkers: raise Exception('error, no benchmarkers') @@ -459,22 +514,24 @@ def do_benchmark(self, name, src, expected_output='FAIL', args=None, reps = 0 baseline = b print('Running benchmarker: %s: %s' % (b.__class__.__name__, b.name)) - b.build(self, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder, has_output_parser=output_parser is not None) + b.build( + self, + filename, + args, + shared_args, + emcc_args, + native_args, + native_exec, + lib_builder, + has_output_parser=output_parser is not None, + ) b.bench(args, output_parser, reps, expected_output) recorded_stats = b.display(baseline) if recorded_stats: self.add_stats(name, recorded_stats) def add_stats(self, name, stats): - self.stats.append({ - 'key': { - 'test': name, - 'units': 'bytes' - }, - 'measurements': { - 'stats': stats - } - }) + self.stats.append({'key': {'test': name, 'units': 'bytes'}, 'measurements': {'stats': stats}}) def test_primes(self, check=True): src = r''' @@ -516,7 +573,12 @@ def test_primes(self, check=True): return 0; } ''' - self.do_benchmark('primes' if check else 'primes-nocheck', src, 'lastprime:' if check else '', shared_args=['-DCHECK'] if check else []) + self.do_benchmark( + 'primes' if check else 'primes-nocheck', + src, + 'lastprime:' if check else '', + shared_args=['-DCHECK'] if check else [], + ) # Also interesting to test it without the printfs which allow checking the output. Without # printf, code size is dominated by the runtime itself (the compiled code is just a few lines). @@ -750,7 +812,7 @@ def test_fannkuch(self): case 5: n = 12; break; default: printf("error: %d\\n", arg); return -1; } - ''' + ''', ) assert 'switch(arg)' in src self.do_benchmark('fannkuch', src, 'Pfannkuchen(') @@ -824,7 +886,9 @@ def test_corrections64(self): def fasta(self, name, double_rep): src = read_file(test_file('fasta.cpp')).replace('double', double_rep) - src = src.replace(' const size_t n = ( argc > 1 ) ? atoi( argv[1] ) : 512;', ''' + src = src.replace( + ' const size_t n = ( argc > 1 ) ? atoi( argv[1] ) : 512;', + ''' int n; int arg = argc > 1 ? argv[1][0] - '0' : 3; switch(arg) { @@ -836,7 +900,8 @@ def fasta(self, name, double_rep): case 5: n = 19000000*10; break; default: printf("error: %d\\n", arg); return -1; } - ''') + ''', + ) assert 'switch(arg)' in src self.do_benchmark('fasta', src, '') @@ -854,8 +919,7 @@ def test_skinning(self): def test_havlak(self): src = read_file(test_file('havlak.cpp')) # This runs many recursive calls (DFS) and thus needs a larger stack - self.do_benchmark('havlak', src, 'Found', shared_args=['-std=c++11'], - emcc_args=['-sSTACK_SIZE=1MB']) + self.do_benchmark('havlak', src, 'Found', shared_args=['-std=c++11'], emcc_args=['-sSTACK_SIZE=1MB']) def test_base64(self): src = read_file(test_file('base64.c')) @@ -870,111 +934,216 @@ def test_zzz_linpack(self): def output_parser(output): mflops = re.search(r'Unrolled Double Precision ([\d\.]+) Mflops', output).group(1) return 10000.0 / float(mflops) - self.do_benchmark('linpack_double', read_file(test_file('benchmark/linpack2.c')), '''Unrolled Double Precision''', force_c=True, output_parser=output_parser) + + self.do_benchmark( + 'linpack_double', + read_file(test_file('benchmark/linpack2.c')), + '''Unrolled Double Precision''', + force_c=True, + output_parser=output_parser, + ) # Benchmarks the synthetic performance of calling native functions. @non_core def test_native_functions(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('native_functions', read_file(test_file('benchmark/benchmark_ffis.cpp')), 'Total time:', - output_parser=output_parser, - # Not minimal because this uses functions in library_browsers.js - emcc_args=['-sMINIMAL_RUNTIME=0'], - shared_args=['-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'native_functions', + read_file(test_file('benchmark/benchmark_ffis.cpp')), + 'Total time:', + output_parser=output_parser, + # Not minimal because this uses functions in library_browsers.js + emcc_args=['-sMINIMAL_RUNTIME=0'], + shared_args=['-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) # Benchmarks the synthetic performance of calling function pointers. @non_core def test_native_function_pointers(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('native_functions', read_file(test_file('benchmark/benchmark_ffis.cpp')), 'Total time:', - output_parser=output_parser, - # Not minimal because this uses functions in library_browsers.js - emcc_args=['-sMINIMAL_RUNTIME=0'], - shared_args=['-DBENCHMARK_FUNCTION_POINTER=1', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'native_functions', + read_file(test_file('benchmark/benchmark_ffis.cpp')), + 'Total time:', + output_parser=output_parser, + # Not minimal because this uses functions in library_browsers.js + emcc_args=['-sMINIMAL_RUNTIME=0'], + shared_args=['-DBENCHMARK_FUNCTION_POINTER=1', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) # Benchmarks the synthetic performance of calling "foreign" JavaScript functions. @non_core def test_foreign_functions(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('foreign_functions', read_file(test_file('benchmark/benchmark_ffis.cpp')), 'Total time:', - output_parser=output_parser, - # Not minimal because this uses functions in library_browsers.js - emcc_args=['--js-library', test_file('benchmark/benchmark_ffis.js'), '-sMINIMAL_RUNTIME=0'], - shared_args=['-DBENCHMARK_FOREIGN_FUNCTION=1', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'foreign_functions', + read_file(test_file('benchmark/benchmark_ffis.cpp')), + 'Total time:', + output_parser=output_parser, + # Not minimal because this uses functions in library_browsers.js + emcc_args=['--js-library', test_file('benchmark/benchmark_ffis.js'), '-sMINIMAL_RUNTIME=0'], + shared_args=['-DBENCHMARK_FOREIGN_FUNCTION=1', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memcpy_128b(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memcpy_128b', read_file(test_file('benchmark/benchmark_memcpy.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMAX_COPY=128', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memcpy_128b', + read_file(test_file('benchmark/benchmark_memcpy.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMAX_COPY=128', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memcpy_4k(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memcpy_4k', read_file(test_file('benchmark/benchmark_memcpy.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=128', '-DMAX_COPY=4096', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memcpy_4k', + read_file(test_file('benchmark/benchmark_memcpy.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=128', '-DMAX_COPY=4096', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memcpy_16k(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memcpy_16k', read_file(test_file('benchmark/benchmark_memcpy.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=4096', '-DMAX_COPY=16384', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memcpy_16k', + read_file(test_file('benchmark/benchmark_memcpy.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=4096', '-DMAX_COPY=16384', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memcpy_1mb(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memcpy_1mb', read_file(test_file('benchmark/benchmark_memcpy.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=16384', '-DMAX_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memcpy_1mb', + read_file(test_file('benchmark/benchmark_memcpy.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=16384', '-DMAX_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memcpy_16mb(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memcpy_16mb', read_file(test_file('benchmark/benchmark_memcpy.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memcpy_16mb', + read_file(test_file('benchmark/benchmark_memcpy.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memset_128b(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memset_128b', read_file(test_file('benchmark/benchmark_memset.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMAX_COPY=128', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memset_128b', + read_file(test_file('benchmark/benchmark_memset.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMAX_COPY=128', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memset_4k(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memset_4k', read_file(test_file('benchmark/benchmark_memset.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=128', '-DMAX_COPY=4096', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memset_4k', + read_file(test_file('benchmark/benchmark_memset.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=128', '-DMAX_COPY=4096', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memset_16k(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memset_16k', read_file(test_file('benchmark/benchmark_memset.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=4096', '-DMAX_COPY=16384', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memset_16k', + read_file(test_file('benchmark/benchmark_memset.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=4096', '-DMAX_COPY=16384', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memset_1mb(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memset_1mb', read_file(test_file('benchmark/benchmark_memset.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=16384', '-DMAX_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memset_1mb', + read_file(test_file('benchmark/benchmark_memset.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=16384', '-DMAX_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) @non_core def test_memset_16mb(self): def output_parser(output): return float(re.search(r'Total time: ([\d\.]+)', output).group(1)) - self.do_benchmark('memset_16mb', read_file(test_file('benchmark/benchmark_memset.cpp')), 'Total time:', output_parser=output_parser, shared_args=['-DMIN_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')]) + + self.do_benchmark( + 'memset_16mb', + read_file(test_file('benchmark/benchmark_memset.cpp')), + 'Total time:', + output_parser=output_parser, + shared_args=['-DMIN_COPY=1048576', '-DBUILD_FOR_SHELL', '-I' + test_file('benchmark')], + ) def test_malloc_multithreading(self): # Multithreaded malloc test. For emcc we use mimalloc here. src = read_file(test_file('other/test_malloc_multithreading.cpp')) # TODO measure with different numbers of cores and not fixed 4 - self.do_benchmark('malloc_multithreading', src, 'Done.', shared_args=['-DWORKERS=4', '-pthread'], emcc_args=['-sEXIT_RUNTIME', '-sMALLOC=mimalloc']) + self.do_benchmark( + 'malloc_multithreading', + src, + 'Done.', + shared_args=['-DWORKERS=4', '-pthread'], + emcc_args=['-sEXIT_RUNTIME', '-sMALLOC=mimalloc'], + ) def test_matrix_multiply(self): def output_parser(output): return float(re.search(r'Total elapsed: ([\d\.]+)', output).group(1)) - self.do_benchmark('matrix_multiply', read_file(test_file('matrix_multiply.cpp')), 'Total elapsed:', output_parser=output_parser, shared_args=['-I' + test_file('benchmark')]) + + self.do_benchmark( + 'matrix_multiply', + read_file(test_file('matrix_multiply.cpp')), + 'Total elapsed:', + output_parser=output_parser, + shared_args=['-I' + test_file('benchmark')], + ) def lua(self, benchmark, expected, output_parser=None, args_processor=None): self.emcc_args.remove('-Werror') @@ -983,13 +1152,34 @@ def lua(self, benchmark, expected, output_parser=None, args_processor=None): def lib_builder(name, native, env_init): # We force recomputation for the native benchmarker because this benchmark # uses native_exec=True, so we need to copy the native executable - return self.get_library(os.path.join('third_party', 'lua_native' if native else 'lua'), [os.path.join('src', 'lua.o'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None, native=native, cache_name_extra=name, env_init=env_init, force_rebuild=native) - - self.do_benchmark('lua_' + benchmark, '', expected, - force_c=True, args=[benchmark + '.lua', DEFAULT_ARG], - emcc_args=['--embed-file', benchmark + '.lua', '-sFORCE_FILESYSTEM', '-sMINIMAL_RUNTIME=0'], # not minimal because of files - lib_builder=lib_builder, native_exec=os.path.join('building', 'third_party', 'lua_native', 'src', 'lua'), - output_parser=output_parser, args_processor=args_processor) + return self.get_library( + os.path.join('third_party', 'lua_native' if native else 'lua'), + [os.path.join('src', 'lua.o'), os.path.join('src', 'liblua.a')], + make=['make', 'generic'], + configure=None, + native=native, + cache_name_extra=name, + env_init=env_init, + force_rebuild=native, + ) + + self.do_benchmark( + 'lua_' + benchmark, + '', + expected, + force_c=True, + args=[benchmark + '.lua', DEFAULT_ARG], + emcc_args=[ + '--embed-file', + benchmark + '.lua', + '-sFORCE_FILESYSTEM', + '-sMINIMAL_RUNTIME=0', + ], # not minimal because of files + lib_builder=lib_builder, + native_exec=os.path.join('building', 'third_party', 'lua_native', 'src', 'lua'), + output_parser=output_parser, + args_processor=args_processor, + ) def test_zzz_lua_scimark(self): def output_parser(output): @@ -1006,30 +1196,62 @@ def test_zzz_zlib(self): src = read_file(test_file('benchmark/test_zlib_benchmark.c')) def lib_builder(name, native, env_init): - return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'], native=native, cache_name_extra=name, env_init=env_init) - - self.do_benchmark('zlib', src, 'ok.', - force_c=True, shared_args=['-I' + test_file('third_party/zlib')], lib_builder=lib_builder) + return self.get_library( + os.path.join('third_party', 'zlib'), + os.path.join('libz.a'), + make_args=['libz.a'], + native=native, + cache_name_extra=name, + env_init=env_init, + ) + + self.do_benchmark( + 'zlib', src, 'ok.', force_c=True, shared_args=['-I' + test_file('third_party/zlib')], lib_builder=lib_builder + ) def test_zzz_coremark(self): src = read_file(test_file('third_party/coremark/core_main.c')) def lib_builder(name, native, env_init): - return self.get_library('third_party/coremark', [os.path.join('coremark.a')], configure=None, native=native, cache_name_extra=name, env_init=env_init) + return self.get_library( + 'third_party/coremark', + [os.path.join('coremark.a')], + configure=None, + native=native, + cache_name_extra=name, + env_init=env_init, + ) def output_parser(output): iters_sec = re.search(r'Iterations/Sec : ([\d\.]+)', output).group(1) return 100000.0 / float(iters_sec) - self.do_benchmark('coremark', src, 'Correct operation validated.', shared_args=['-I' + test_file('third_party/coremark')], lib_builder=lib_builder, output_parser=output_parser, force_c=True) + self.do_benchmark( + 'coremark', + src, + 'Correct operation validated.', + shared_args=['-I' + test_file('third_party/coremark')], + lib_builder=lib_builder, + output_parser=output_parser, + force_c=True, + ) def test_zzz_box2d(self): src = read_file(test_file('benchmark/test_box2d_benchmark.cpp')) def lib_builder(name, native, env_init): - return self.get_library(os.path.join('third_party', 'box2d'), ['box2d.a'], configure=None, native=native, cache_name_extra=name, env_init=env_init) - - self.do_benchmark('box2d', src, 'frame averages', shared_args=['-I' + test_file('third_party/box2d')], lib_builder=lib_builder) + return self.get_library( + os.path.join('third_party', 'box2d'), + ['box2d.a'], + configure=None, + native=native, + cache_name_extra=name, + env_init=env_init, + ) + + self.do_benchmark( + 'box2d', src, 'frame averages', shared_args=['-I' + test_file('third_party/box2d')], lib_builder=lib_builder + ) def test_zzz_bullet(self): self.emcc_args.remove('-Werror') @@ -1038,40 +1260,64 @@ def test_zzz_bullet(self): src += read_file(test_file('third_party/bullet/Demos/Benchmarks/main.cpp')) def lib_builder(name, native, env_init): - return self.get_library(str(Path('third_party/bullet')), - [Path('src/.libs/libBulletDynamics.a'), - Path('src/.libs/libBulletCollision.a'), - Path('src/.libs/libLinearMath.a')], - # The --host parameter is needed for 2 reasons: - # 1) bullet in it's configure.ac tries to do platform detection and will fail on unknown platforms - # 2) configure will try to compile and run a test file to check if the C compiler is sane. As Cheerp - # will generate a wasm file (which cannot be run), configure will fail. Passing `--host` enables - # cross compile mode, which lets configure complete happily. - configure_args=['--disable-demos', '--disable-dependency-tracking', '--host=i686-unknown-linux'], native=native, cache_name_extra=name, env_init=env_init) - - self.do_benchmark('bullet', src, '\nok.\n', - shared_args=['-I' + test_file('third_party/bullet/src'), '-I' + test_file('third_party/bullet/Demos/Benchmarks')], - lib_builder=lib_builder) + return self.get_library( + str(Path('third_party/bullet')), + [ + Path('src/.libs/libBulletDynamics.a'), + Path('src/.libs/libBulletCollision.a'), + Path('src/.libs/libLinearMath.a'), + ], + # The --host parameter is needed for 2 reasons: + # 1) bullet in it's configure.ac tries to do platform detection and will fail on unknown platforms + # 2) configure will try to compile and run a test file to check if the C compiler is sane. As Cheerp + # will generate a wasm file (which cannot be run), configure will fail. Passing `--host` enables + # cross compile mode, which lets configure complete happily. + configure_args=['--disable-demos', '--disable-dependency-tracking', '--host=i686-unknown-linux'], + native=native, + cache_name_extra=name, + env_init=env_init, + ) + + self.do_benchmark( + 'bullet', + src, + '\nok.\n', + shared_args=['-I' + test_file('third_party/bullet/src'), '-I' + test_file('third_party/bullet/Demos/Benchmarks')], + lib_builder=lib_builder, + ) def test_zzz_lzma(self): src = read_file(test_file('benchmark/test_lzma_benchmark.c')) def lib_builder(name, native, env_init): - return self.get_library(os.path.join('third_party', 'lzma'), [os.path.join('lzma.a')], configure=None, native=native, cache_name_extra=name, env_init=env_init) + return self.get_library( + os.path.join('third_party', 'lzma'), + [os.path.join('lzma.a')], + configure=None, + native=native, + cache_name_extra=name, + env_init=env_init, + ) self.do_benchmark('lzma', src, 'ok.', shared_args=['-I' + test_file('third_party/lzma')], lib_builder=lib_builder) def test_zzz_sqlite(self): src = read_file(test_file('third_party/sqlite/sqlite3.c')) + read_file(test_file('sqlite/speedtest1.c')) - self.do_benchmark('sqlite', src, 'TOTAL...', - native_args=['-ldl', '-pthread'], - shared_args=['-I' + test_file('third_party/sqlite')], - # not minimal because of files - emcc_args=['-sFILESYSTEM', '-sMINIMAL_RUNTIME=0'], - force_c=True) + self.do_benchmark( + 'sqlite', + src, + 'TOTAL...', + native_args=['-ldl', '-pthread'], + shared_args=['-I' + test_file('third_party/sqlite')], + # not minimal because of files + emcc_args=['-sFILESYSTEM', '-sMINIMAL_RUNTIME=0'], + force_c=True, + ) def test_zzz_poppler(self): - utils.write_file('pre.js', ''' + utils.write_file( + 'pre.js', + ''' var benchmarkArgument = %s; var benchmarkArgumentToPageCount = { '0': 0, @@ -1107,17 +1353,27 @@ def test_zzz_poppler(self): out(files.length + ' files emitted, total output size: ' + totalSize + ', hashed printout: ' + hash); }; } - ''' % DEFAULT_ARG) + ''' + % DEFAULT_ARG, + ) def lib_builder(name, native, env_init): return self.get_poppler_library(env_init=env_init) # TODO: Fix poppler native build and remove skip_native=True - self.do_benchmark('poppler', '', 'hashed printout', - shared_args=['-I' + test_file('poppler/include'), - '-I' + test_file('freetype/include')], - emcc_args=['-sFILESYSTEM', '--pre-js=pre.js', '--embed-file', - test_file('poppler/emscripten_html5.pdf') + '@input.pdf', - '-sERROR_ON_UNDEFINED_SYMBOLS=0', - '-sMINIMAL_RUNTIME=0'], # not minimal because of files - lib_builder=lib_builder, skip_native=True) + self.do_benchmark( + 'poppler', + '', + 'hashed printout', + shared_args=['-I' + test_file('poppler/include'), '-I' + test_file('freetype/include')], + emcc_args=[ + '-sFILESYSTEM', + '--pre-js=pre.js', + '--embed-file', + test_file('poppler/emscripten_html5.pdf') + '@input.pdf', + '-sERROR_ON_UNDEFINED_SYMBOLS=0', + '-sMINIMAL_RUNTIME=0', + ], # not minimal because of files + lib_builder=lib_builder, + skip_native=True, + ) diff --git a/test/test_browser.py b/test/test_browser.py index 135e69d0203b8..280cde1fb431a 100644 --- a/test/test_browser.py +++ b/test/test_browser.py @@ -69,7 +69,7 @@ def do_GET(s): end = min(len(data) - 1, end) length = end - start + 1 s.sendheaders([], length) - s.wfile.write(data[start:end + 1]) + s.wfile.write(data[start : end + 1]) # CORS preflight makes OPTIONS requests which we need to account for. expectedConns = 22 @@ -92,8 +92,7 @@ def metafunc(self, wasmfs, *args, **kwargs): else: f(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'wasmfs': (True,)}) + parameterize(metafunc, {'': (False,), 'wasmfs': (True,)}) return metafunc @@ -108,8 +107,7 @@ def metafunc(self, proxied, *args, **kwargs): self.proxied = proxied f(self, *args, **kwargs) - parameterize(metafunc, {'': (False,), - 'proxied': (True,)}) + parameterize(metafunc, {'': (False,), 'proxied': (True,)}) return metafunc @@ -141,7 +139,9 @@ def decorator(f): def decorated(self, *args, **kwargs): self.set_setting('WASMFS', 0) f(self, *args, **kwargs) + return decorated + return decorator @@ -198,8 +198,7 @@ def decorated(self, threads, *args, **kwargs): self.emcc_args += ['-pthread'] f(self, *args, **kwargs) - parameterize(decorated, {'': (False,), - 'pthreads': (True,)}) + parameterize(decorated, {'': (False,), 'pthreads': (True,)}) return decorated @@ -219,10 +218,14 @@ def decorated(self, *args, **kwargs): return decorator -requires_graphics_hardware = skipExecIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), 'This test requires graphics hardware') +requires_graphics_hardware = skipExecIf( + os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), 'This test requires graphics hardware' +) requires_webgpu = unittest.skipIf(os.getenv('EMTEST_LACKS_WEBGPU'), "This test requires WebGPU to be available") requires_sound_hardware = skipExecIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), 'This test requires sound hardware') -requires_offscreen_canvas = skipExecIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), 'This test requires a browser with OffscreenCanvas') +requires_offscreen_canvas = skipExecIf( + os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), 'This test requires a browser with OffscreenCanvas' +) class browser(BrowserCore): @@ -245,14 +248,18 @@ def setUp(self): def require_jspi(self): if not is_chrome(): - self.skipTest(f'Current browser ({EMTEST_BROWSER}) does not support JSPI. Only chromium-based browsers ({CHROMIUM_BASED_BROWSERS}) support JSPI today.') + self.skipTest( + f'Current browser ({EMTEST_BROWSER}) does not support JSPI. Only chromium-based browsers ({CHROMIUM_BASED_BROWSERS}) support JSPI today.' + ) super(browser, self).require_jspi() def post_manual_reftest(self): assert os.path.exists('reftest.js') shutil.copy(test_file('browser_reporting.js'), '.') html = read_file('test.html') - html = html.replace('', ''' + html = html.replace( + '', + ''' -''') +''', + ) create_file('test.html', html) def make_reftest(self, expected): # make sure the pngs used here have no color correction, using e.g. # pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile shutil.copy(expected, 'expected.png') - create_file('reftest.js', f''' + create_file( + 'reftest.js', + f''' const reftestRebaseline = {common.EMTEST_REBASELINE}; - ''' + read_file(test_file('reftest.js'))) + ''' + + read_file(test_file('reftest.js')), + ) def reftest(self, filename, reference, reference_slack=0, *args, **kwargs): - """Special case of `btest` that uses reference image - """ + """Special case of `btest` that uses reference image""" reference = find_browser_test_file(reference) assert 'expected' not in kwargs expected = [str(i) for i in range(0, reference_slack + 1)] @@ -308,7 +319,7 @@ def test_sdl1_in_emscripten_nonstrict_mode(self): def test_sdl1(self): self.reftest('hello_world_sdl.c', 'htmltest.png', args=['-lSDL', '-lGL']) - self.reftest('hello_world_sdl.c', 'htmltest.png', args=['-sUSE_SDL', '-lGL']) # is the default anyhow + self.reftest('hello_world_sdl.c', 'htmltest.png', args=['-sUSE_SDL', '-lGL']) # is the default anyhow def test_sdl1_es6(self): self.reftest('hello_world_sdl.c', 'htmltest.png', args=['-sUSE_SDL', '-lGL', '-sEXPORT_ES6']) @@ -324,7 +335,9 @@ def test_zzz_html_source_map(self): # multiple mapped lines. in other words, if the program consists of a # single 'throw' statement, browsers may just map any thrown exception to # that line, because it will be the only mapped line. - create_file('src.cpp', r''' + create_file( + 'src.cpp', + r''' #include int main() { @@ -335,7 +348,8 @@ def test_zzz_html_source_map(self): printf("done\n"); return 0; } - ''') + ''', + ) # use relative paths when calling emcc, because file:// URIs can only load # sourceContent when the maps are relative paths self.compile_btest('src.cpp', ['-o', 'src.html', '-gsource-map']) @@ -352,8 +366,10 @@ def test_zzz_html_source_map(self): ''') def test_emscripten_log(self): - self.btest_exit('emscripten_log/emscripten_log.cpp', - args=['-Wno-deprecated-pragma', '--pre-js', path_from_root('src/emscripten-source-map.min.js'), '-gsource-map']) + self.btest_exit( + 'emscripten_log/emscripten_log.cpp', + args=['-Wno-deprecated-pragma', '--pre-js', path_from_root('src/emscripten-source-map.min.js'), '-gsource-map'], + ) @also_with_wasmfs def test_preload_file(self): @@ -365,11 +381,13 @@ def test_preload_file(self): def make_main(path): print('make main at', path) - path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string. + path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string. # TODO: change this when wasmfs supports relative paths. if self.get_setting('WASMFS'): path = "/" + path - create_file('main.c', r''' + create_file( + 'main.c', + r''' #include #include #include @@ -385,7 +403,9 @@ def make_main(path): assert(strcmp("load me right before", buf) == 0); return 0; } - ''' % path) + ''' + % path, + ) test_cases = [ # (source preload-file string, file on target FS to load) @@ -404,7 +424,8 @@ def make_main(path): ("somefile.txt@/directory/file.txt", "directory/file.txt"), (absolute_src_path + "@/directory/file.txt", "directory/file.txt"), ("some@@file.txt@other.txt", "other.txt"), - ("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")] + ("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt"), + ] for srcpath, dstpath in test_cases: print('Testing', srcpath, dstpath) @@ -428,13 +449,13 @@ def make_main(path): # By absolute path - make_main('somefile.txt') # absolute becomes relative + make_main('somefile.txt') # absolute becomes relative self.btest_exit('main.c', args=['--preload-file', absolute_src_path]) # Test subdirectory handling with asset packaging. delete_dir('assets') ensure_dir('assets/sub/asset1') - ensure_dir('assets/sub/asset1/.git') # Test adding directory that shouldn't exist. + ensure_dir('assets/sub/asset1/.git') # Test adding directory that shouldn't exist. ensure_dir('assets/sub/asset2') create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''') create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''') @@ -442,7 +463,9 @@ def make_main(path): absolute_assets_src_path = 'assets' def make_main_two_files(path1, path2, nonexistingpath): - create_file('main.c', r''' + create_file( + 'main.c', + r''' #include #include #include @@ -466,17 +489,40 @@ def make_main_two_files(path1, path2, nonexistingpath): return 0; } - ''' % (path1, path2, nonexistingpath)) + ''' + % (path1, path2, nonexistingpath), + ) test_cases = [ # (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS) - ("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"), - ("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"), + ( + "assets", + "assets/sub/asset1/file1.txt", + "assets/sub/asset2/file2.txt", + "assets/sub/asset1/.git/shouldnt_be_embedded.txt", + ), + ( + "assets/", + "assets/sub/asset1/file1.txt", + "assets/sub/asset2/file2.txt", + "assets/sub/asset1/.git/shouldnt_be_embedded.txt", + ), ("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"), ("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"), ("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"), - (absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"), - (absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")] + ( + absolute_assets_src_path + "@/", + "/sub/asset1/file1.txt", + "/sub/asset2/file2.txt", + "/sub/asset1/.git/shouldnt_be_embedded.txt", + ), + ( + absolute_assets_src_path + "@/assets", + "/assets/sub/asset1/file1.txt", + "/assets/sub/asset2/file2.txt", + "assets/sub/asset1/.git/shouldnt_be_embedded.txt", + ), + ] for test in test_cases: (srcpath, dstpath1, dstpath2, nonexistingpath) = test @@ -486,29 +532,39 @@ def make_main_two_files(path1, path2, nonexistingpath): # Should still work with -o subdir/.. - make_main('somefile.txt') # absolute becomes relative + make_main('somefile.txt') # absolute becomes relative ensure_dir('dirrey') - self.compile_btest('main.c', ['--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY) + self.compile_btest( + 'main.c', ['--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY + ) self.run_browser('dirrey/page.html', '/report_result?exit:0') # With FS.preloadFile - create_file('pre.js', ''' + create_file( + 'pre.js', + ''' // we need --use-preload-plugins for this. Module.preRun = () => FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); - ''') + ''', + ) make_main('someotherfile.txt') self.btest_exit('main.c', args=['--pre-js', 'pre.js', '--use-preload-plugins']) # Tests that user .html shell files can manually download .data files created with --preload-file cmdline. - @parameterized({ - '': ([],), - 'pthreads': (['-pthread', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME'],), - }) + @parameterized( + { + '': ([],), + 'pthreads': (['-pthread', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME'],), + } + ) def test_preload_file_with_manual_data_download(self, args): create_file('file.txt', 'Hello!') - self.compile_btest('browser/test_manual_download_data.c', ['-sEXIT_RUNTIME', '-o', 'out.js', '--preload-file', 'file.txt@/file.txt'] + args) + self.compile_btest( + 'browser/test_manual_download_data.c', + ['-sEXIT_RUNTIME', '-o', 'out.js', '--preload-file', 'file.txt@/file.txt'] + args, + ) shutil.copy(test_file('browser/test_manual_download_data.html'), '.') # Move .data file out of server root to ensure that getPreloadedPackage is actually used @@ -522,7 +578,9 @@ def test_preload_file_with_manual_data_download(self, args): # correctly escaping the names. def test_output_file_escaping(self): self.set_setting('EXIT_RUNTIME') - tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that. + tricky_part = ( + '\'' if WINDOWS else '\' and "' + ) # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that. d = 'dir with ' + tricky_part abs_d = os.path.abspath(d) @@ -531,7 +589,9 @@ def test_output_file_escaping(self): create_file(os.path.join(d, txt), 'load me right before') src = os.path.join(d, 'file with ' + tricky_part + '.c') - create_file(src, r''' + create_file( + src, + r''' #include #include #include @@ -546,26 +606,44 @@ def test_output_file_escaping(self): assert(strcmp("load me right before", buf) == 0); return 0; } - ''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))) + ''' + % (txt.replace('\'', '\\\'').replace('"', '\\"')), + ) data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data') data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js') abs_txt = os.path.join(abs_d, txt) - self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file]) + self.run_process( + [ + FILE_PACKAGER, + data_file, + '--use-preload-cache', + '--indexedDB-name=testdb', + '--preload', + abs_txt + '@' + txt, + '--js-output=' + data_js_file, + ] + ) page_file = os.path.join(d, 'file with ' + tricky_part + '.html') abs_page_file = os.path.abspath(page_file) - self.compile_btest(src, ['--pre-js', data_js_file, '-o', abs_page_file, '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY) + self.compile_btest( + src, ['--pre-js', data_js_file, '-o', abs_page_file, '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY + ) self.run_browser(page_file, '/report_result?exit:0') - @parameterized({ - '0': (0,), - '1mb': (1 * 1024 * 1024,), - '100mb': (100 * 1024 * 1024,), - '150mb': (150 * 1024 * 1024,), - }) + @parameterized( + { + '0': (0,), + '1mb': (1 * 1024 * 1024,), + '100mb': (100 * 1024 * 1024,), + '150mb': (150 * 1024 * 1024,), + } + ) def test_preload_caching(self, extra_size): self.set_setting('EXIT_RUNTIME') - create_file('main.c', r''' + create_file( + 'main.c', + r''' #include #include #include @@ -584,9 +662,13 @@ def test_preload_caching(self, extra_size): assert(strcmp("load me right before", buf) == 0); return checkPreloadResults(); } - ''' % 'somefile.txt') + ''' + % 'somefile.txt', + ) - create_file('test.js', ''' + create_file( + 'test.js', + ''' addToLibrary({ checkPreloadResults: function() { var cached = 0; @@ -599,7 +681,8 @@ def test_preload_caching(self, extra_size): return cached; } }); - ''') + ''', + ) # test caching of various sizes, including sizes higher than 128MB which is # chrome's limit on IndexedDB item sizes, see @@ -609,7 +692,20 @@ def test_preload_caching(self, extra_size): self.skipTest('chrome bug') create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size)) print('size:', os.path.getsize('somefile.txt')) - self.compile_btest('main.c', ['--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-sALLOW_MEMORY_GROWTH'], reporting=Reporting.JS_ONLY) + self.compile_btest( + 'main.c', + [ + '--use-preload-cache', + '--js-library', + 'test.js', + '--preload-file', + 'somefile.txt', + '-o', + 'page.html', + '-sALLOW_MEMORY_GROWTH', + ], + reporting=Reporting.JS_ONLY, + ) self.run_browser('page.html', '/report_result?exit:0') self.run_browser('page.html', '/report_result?exit:1') @@ -619,7 +715,9 @@ def test_preload_caching_indexeddb_name(self): def make_main(path): print(path) - create_file('main.c', r''' + create_file( + 'main.c', + r''' #include #include #include @@ -640,9 +738,13 @@ def make_main(path): assert(strcmp("load me right before", buf) == 0); return checkPreloadResults(); } - ''' % path) + ''' + % path, + ) - create_file('test.js', ''' + create_file( + 'test.js', + ''' addToLibrary({ checkPreloadResults: function() { var cached = 0; @@ -655,11 +757,26 @@ def make_main(path): return cached; } }); - ''') + ''', + ) make_main('somefile.txt') - self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js']) - self.compile_btest('main.c', ['--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY) + self.run_process( + [ + FILE_PACKAGER, + 'somefile.data', + '--use-preload-cache', + '--indexedDB-name=testdb', + '--preload', + 'somefile.txt', + '--js-output=' + 'somefile.js', + ] + ) + self.compile_btest( + 'main.c', + ['--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-sFORCE_FILESYSTEM'], + reporting=Reporting.JS_ONLY, + ) self.run_browser('page.html', '/report_result?exit:0') self.run_browser('page.html', '/report_result?exit:1') @@ -668,7 +785,9 @@ def test_multifile(self): ensure_dir('subdirr/moar') create_file('subdirr/data1.txt', '1214141516171819') create_file('subdirr/moar/data2.txt', '3.14159265358979') - create_file('main.c', r''' + create_file( + 'main.c', + r''' #include #include #include @@ -692,7 +811,8 @@ def test_multifile(self): return 0; } - ''') + ''', + ) # by individual files self.btest_exit('main.c', args=['--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt']) @@ -712,7 +832,11 @@ def test_custom_file_package_url(self): # you would add this in your own custom html file etc., and not by # modifying the existing shell in this manner default_shell = read_file(path_from_root('src/shell.html')) - create_file('shell.html', default_shell.replace('var Module = {', ''' + create_file( + 'shell.html', + default_shell.replace( + 'var Module = {', + ''' var Module = { locateFile: function(path, prefix) { if (path.endsWith(".wasm")) { @@ -721,8 +845,12 @@ def test_custom_file_package_url(self): return "cdn/" + path; } }, - ''')) - create_file('main.c', r''' + ''', + ), + ) + create_file( + 'main.c', + r''' #include #include #include @@ -739,10 +867,15 @@ def test_custom_file_package_url(self): return 0; } - ''') + ''', + ) self.set_setting('EXIT_RUNTIME') - self.compile_btest('main.c', ['--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], reporting=Reporting.JS_ONLY) + self.compile_btest( + 'main.c', + ['--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], + reporting=Reporting.JS_ONLY, + ) shutil.move('test.data', Path('cdn/test.data')) self.run_browser('test.html', '/report_result?exit:0') @@ -750,7 +883,9 @@ def test_missing_data_throws_error(self): def setup(assetLocalization): self.clear() create_file('data.txt', 'data') - create_file('main.c', r''' + create_file( + 'main.c', + r''' #include #include #include @@ -758,8 +893,11 @@ def setup(assetLocalization): // This code should never be executed in terms of missing required dependency file. return 0; } - ''') - create_file('on_window_error_shell.html', r''' + ''', + ) + create_file( + 'on_window_error_shell.html', + r'''


@@ -776,7 +914,9 @@ def setup(assetLocalization): window.addEventListener('error', handler); window.addEventListener('unhandledrejection', handler); var Module = { - locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}}, + locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + + assetLocalization + + r'''" + path;}}, print: (function() { var element = document.getElementById('output'); return function(text) { element.innerHTML += text.replace('\n', '
', 'g') + '
';}; @@ -786,23 +926,30 @@ def setup(assetLocalization): {{{ SCRIPT }}} - ''') + ''', + ) def test(): # test test missing file should run xhr.onload with status different than 200, 304 or 206 setup("") - self.compile_btest('main.c', ['--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']) + self.compile_btest( + 'main.c', ['--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'] + ) shutil.move('test.data', 'missing.data') self.run_browser('test.html', '/report_result?1') # test unknown protocol should go through xhr.onerror setup("unknown_protocol://") - self.compile_btest('main.c', ['--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']) + self.compile_btest( + 'main.c', ['--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'] + ) self.run_browser('test.html', '/report_result?1') # test wrong protocol and port setup("https://localhost:8800/") - self.compile_btest('main.c', ['--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']) + self.compile_btest( + 'main.c', ['--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'] + ) self.run_browser('test.html', '/report_result?1') test() @@ -827,105 +974,175 @@ def test_sdl_image(self): # load an image file, get pixel data. Also O2 coverage for --preload-file shutil.copy(test_file('screenshot.jpg'), '.') src = test_file('browser/test_sdl_image.c') - for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'), - ('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]: - self.btest_exit(src, args=[ - '-O2', '-lSDL', '-lGL', - '--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins' - ]) + for dest, dirname, basename in [ + ('screenshot.jpg', '/', 'screenshot.jpg'), + ('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg'), + ]: + self.btest_exit( + src, + args=[ + '-O2', + '-lSDL', + '-lGL', + '--preload-file', + dest, + '-DSCREENSHOT_DIRNAME="' + dirname + '"', + '-DSCREENSHOT_BASENAME="' + basename + '"', + '--use-preload-plugins', + ], + ) @also_with_wasmfs def test_sdl_image_jpeg(self): shutil.copy(test_file('screenshot.jpg'), 'screenshot.jpeg') - self.btest_exit('test_sdl_image.c', args=[ - '--preload-file', 'screenshot.jpeg', - '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins', - '-lSDL', '-lGL', - ]) + self.btest_exit( + 'test_sdl_image.c', + args=[ + '--preload-file', + 'screenshot.jpeg', + '-DSCREENSHOT_DIRNAME="/"', + '-DSCREENSHOT_BASENAME="screenshot.jpeg"', + '--use-preload-plugins', + '-lSDL', + '-lGL', + ], + ) def test_sdl_image_webp(self): shutil.copy(test_file('screenshot.webp'), '.') - self.btest_exit('test_sdl_image.c', args=[ - '--preload-file', 'screenshot.webp', - '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.webp"', '--use-preload-plugins', - '-lSDL', '-lGL', - ]) + self.btest_exit( + 'test_sdl_image.c', + args=[ + '--preload-file', + 'screenshot.webp', + '-DSCREENSHOT_DIRNAME="/"', + '-DSCREENSHOT_BASENAME="screenshot.webp"', + '--use-preload-plugins', + '-lSDL', + '-lGL', + ], + ) @also_with_wasmfs @also_with_proxying def test_sdl_image_prepare(self): # load an image file, get pixel data. shutil.copy(test_file('screenshot.jpg'), 'screenshot.not') - self.reftest('test_sdl_image_prepare.c', 'screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL']) - - @parameterized({ - '': ([],), - # add testing for closure on preloaded files + ENVIRONMENT=web (we must not - # emit any node.js code here, see - # https://github.com/emscripten-core/emscripten/issues/14486 - 'closure_webonly': (['--closure', '1', '-sENVIRONMENT=web'],) - }) + self.reftest( + 'test_sdl_image_prepare.c', 'screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + ) + + @parameterized( + { + '': ([],), + # add testing for closure on preloaded files + ENVIRONMENT=web (we must not + # emit any node.js code here, see + # https://github.com/emscripten-core/emscripten/issues/14486 + 'closure_webonly': (['--closure', '1', '-sENVIRONMENT=web'],), + } + ) def test_sdl_image_prepare_data(self, args): # load an image file, get pixel data. shutil.copy(test_file('screenshot.jpg'), 'screenshot.not') - self.reftest('test_sdl_image_prepare_data.c', 'screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args) + self.reftest( + 'test_sdl_image_prepare_data.c', + 'screenshot.jpg', + args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args, + ) def test_sdl_image_must_prepare(self): # load an image file, get pixel data. shutil.copy(test_file('screenshot.jpg'), 'screenshot.jpg') - self.reftest('test_sdl_image_must_prepare.c', 'screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_image_must_prepare.c', 'screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'] + ) def test_sdl_stb_image(self): # load an image file, get pixel data. shutil.copy(test_file('screenshot.jpg'), 'screenshot.not') - self.reftest('test_sdl_stb_image.c', 'screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_stb_image.c', + 'screenshot.jpg', + args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'], + ) def test_sdl_stb_image_bpp(self): # load grayscale image without alpha shutil.copy(test_file('browser/test_sdl-stb-bpp1.png'), 'screenshot.not') - self.reftest('test_sdl_stb_image.c', 'test_sdl-stb-bpp1.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_stb_image.c', + 'test_sdl-stb-bpp1.png', + args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'], + ) # load grayscale image with alpha self.clear() shutil.copy(test_file('browser/test_sdl-stb-bpp2.png'), 'screenshot.not') - self.reftest('test_sdl_stb_image.c', 'test_sdl-stb-bpp2.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_stb_image.c', + 'test_sdl-stb-bpp2.png', + args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'], + ) # load RGB image self.clear() shutil.copy(test_file('browser/test_sdl-stb-bpp3.png'), 'screenshot.not') - self.reftest('test_sdl_stb_image.c', 'test_sdl-stb-bpp3.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_stb_image.c', + 'test_sdl-stb-bpp3.png', + args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'], + ) # load RGBA image self.clear() shutil.copy(test_file('browser/test_sdl-stb-bpp4.png'), 'screenshot.not') - self.reftest('test_sdl_stb_image.c', 'test_sdl-stb-bpp4.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_stb_image.c', + 'test_sdl-stb-bpp4.png', + args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'], + ) def test_sdl_stb_image_data(self): # load an image file, get pixel data. shutil.copy(test_file('screenshot.jpg'), 'screenshot.not') - self.reftest('test_sdl_stb_image_data.c', 'screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_stb_image_data.c', + 'screenshot.jpg', + args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'], + ) def test_sdl_stb_image_cleanup(self): shutil.copy(test_file('screenshot.jpg'), 'screenshot.not') - self.btest_exit('test_sdl_stb_image_cleanup.c', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler']) - - @parameterized({ - '': ([],), - 'safe_heap': (['-sSAFE_HEAP'],), - 'safe_heap_O2': (['-sSAFE_HEAP', '-O2'],), - }) + self.btest_exit( + 'test_sdl_stb_image_cleanup.c', + args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'], + ) + + @parameterized( + { + '': ([],), + 'safe_heap': (['-sSAFE_HEAP'],), + 'safe_heap_O2': (['-sSAFE_HEAP', '-O2'],), + } + ) def test_sdl_canvas(self, args): self.btest_exit('test_sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-lSDL', '-lGL'] + args) @proxied def test_sdl_canvas_proxy(self): create_file('data.txt', 'datum') - self.reftest('test_sdl_canvas_proxy.c', 'test_sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL']) + self.reftest( + 'test_sdl_canvas_proxy.c', + 'test_sdl_canvas_proxy.png', + args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], + ) @requires_graphics_hardware def test_glgears_proxy_jstarget(self): # test .js target with --proxy-worker; emits 2 js files, client and worker - self.compile_btest('hello_world_gles_proxy.c', ['-o', 'test.js', '--proxy-to-worker', '-sGL_TESTING', '-lGL', '-lglut']) + self.compile_btest( + 'hello_world_gles_proxy.c', ['-o', 'test.js', '--proxy-to-worker', '-sGL_TESTING', '-lGL', '-lglut'] + ) shell_with_script('shell_minimal.html', 'test.html', '') self.make_reftest(test_file('gears.png')) self.post_manual_reftest() @@ -937,20 +1154,26 @@ def test_sdl_canvas_alpha(self): create_file('flag_0.js', "Module['arguments'] = ['-0'];") self.reftest('test_sdl_canvas_alpha.c', 'test_sdl_canvas_alpha.png', args=['-lSDL', '-lGL'], reference_slack=12) - self.reftest('test_sdl_canvas_alpha.c', 'test_sdl_canvas_alpha_flag_0.png', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference_slack=12) - - @parameterized({ - '': ([],), - 'eventhandler': (['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER'],), - }) - @parameterized({ - '': ([],), - 'asyncify': (['-DTEST_SLEEP', '-sASSERTIONS', '-sSAFE_HEAP', '-sASYNCIFY'],), - }) - @parameterized({ - '': (False,), - 'delay': (True,) - }) + self.reftest( + 'test_sdl_canvas_alpha.c', + 'test_sdl_canvas_alpha_flag_0.png', + args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], + reference_slack=12, + ) + + @parameterized( + { + '': ([],), + 'eventhandler': (['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER'],), + } + ) + @parameterized( + { + '': ([],), + 'asyncify': (['-DTEST_SLEEP', '-sASSERTIONS', '-sSAFE_HEAP', '-sASYNCIFY'],), + } + ) + @parameterized({'': (False,), 'delay': (True,)}) def test_sdl_key(self, defines, async_, delay): if delay: settimeout_start = 'setTimeout(function() {' @@ -958,7 +1181,9 @@ def test_sdl_key(self, defines, async_, delay): else: settimeout_start = '' settimeout_end = '' - create_file('pre.js', ''' + create_file( + 'pre.js', + ''' function keydown(c) { %s simulateKeyDown(c); @@ -970,12 +1195,20 @@ def test_sdl_key(self, defines, async_, delay): simulateKeyUp(c); %s } - ''' % (settimeout_start, settimeout_end, settimeout_start, settimeout_end)) - self.btest_exit('test_sdl_key.c', 223092870, args=defines + async_ + ['--pre-js', test_file('browser/fake_events.js'), '--pre-js=pre.js', '-lSDL', '-lGL']) + ''' + % (settimeout_start, settimeout_end, settimeout_start, settimeout_end), + ) + self.btest_exit( + 'test_sdl_key.c', + 223092870, + args=defines + async_ + ['--pre-js', test_file('browser/fake_events.js'), '--pre-js=pre.js', '-lSDL', '-lGL'], + ) def test_sdl_key_proxy(self): shutil.copy(test_file('browser/fake_events.js'), '.') - create_file('pre.js', ''' + create_file( + 'pre.js', + ''' Module.postRun = () => { function doOne() { Module._one(); @@ -983,11 +1216,14 @@ def test_sdl_key_proxy(self): } setTimeout(doOne, 1000/60); } - ''') + ''', + ) def post(): html = read_file('test.html') - html = html.replace('', ''' + html = html.replace( + '', + ''' -''') +''', + ) create_file('test.html', html) - self.btest_exit('test_sdl_key_proxy.c', 223092870, args=['--proxy-to-worker', '--pre-js', 'pre.js', '-lSDL', '-lGL', '-sRUNTIME_DEBUG'], post_build=post) + self.btest_exit( + 'test_sdl_key_proxy.c', + 223092870, + args=['--proxy-to-worker', '--pre-js', 'pre.js', '-lSDL', '-lGL', '-sRUNTIME_DEBUG'], + post_build=post, + ) def test_canvas_focus(self): self.btest_exit('test_canvas_focus.c', args=['--pre-js', test_file('browser/fake_events.js')]) @@ -1009,7 +1251,9 @@ def test_canvas_focus(self): def test_keydown_preventdefault_proxy(self): def post(): html = read_file('test.html') - html = html.replace('', ''' + html = html.replace( + '', + ''' -''') +''', + ) create_file('test.html', html) @@ -1031,7 +1276,9 @@ def post(): self.btest_exit('browser/test_keydown_preventdefault_proxy.c', 300, args=['--proxy-to-worker'], post_build=post) def test_sdl_text(self): - create_file('pre.js', ''' + create_file( + 'pre.js', + ''' Module.postRun = () => { function doOne() { Module._one(); @@ -1039,15 +1286,22 @@ def test_sdl_text(self): } setTimeout(doOne, 1000/60); } - ''') + ''', + ) - self.btest_exit('test_sdl_text.c', args=['--pre-js', 'pre.js', '--pre-js', test_file('browser/fake_events.js'), '-lSDL', '-lGL']) + self.btest_exit( + 'test_sdl_text.c', args=['--pre-js', 'pre.js', '--pre-js', test_file('browser/fake_events.js'), '-lSDL', '-lGL'] + ) def test_sdl_mouse(self): - self.btest_exit('test_sdl_mouse.c', args=['-O2', '--minify=0', '--pre-js', test_file('browser/fake_events.js'), '-lSDL', '-lGL']) + self.btest_exit( + 'test_sdl_mouse.c', args=['-O2', '--minify=0', '--pre-js', test_file('browser/fake_events.js'), '-lSDL', '-lGL'] + ) def test_sdl_mouse_offsets(self): - create_file('page.html', ''' + create_file( + 'page.html', + '''